1 /* Partial redundancy elimination / Hoisting for RTL.
2 Copyright (C) 1997-2022 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* TODO
21 - reordering of memory allocation and freeing to be more space efficient
22 - calc rough register pressure information and use the info to drive all
23 kinds of code motion (including code hoisting) in a unified way.
24 */
25
26 /* References searched while implementing this.
27
28 Compilers Principles, Techniques and Tools
29 Aho, Sethi, Ullman
30 Addison-Wesley, 1988
31
32 Global Optimization by Suppression of Partial Redundancies
33 E. Morel, C. Renvoise
34 communications of the acm, Vol. 22, Num. 2, Feb. 1979
35
36 A Portable Machine-Independent Global Optimizer - Design and Measurements
37 Frederick Chow
38 Stanford Ph.D. thesis, Dec. 1983
39
40 A Fast Algorithm for Code Movement Optimization
41 D.M. Dhamdhere
42 SIGPLAN Notices, Vol. 23, Num. 10, Oct. 1988
43
44 A Solution to a Problem with Morel and Renvoise's
45 Global Optimization by Suppression of Partial Redundancies
46 K-H Drechsler, M.P. Stadel
47 ACM TOPLAS, Vol. 10, Num. 4, Oct. 1988
48
49 Practical Adaptation of the Global Optimization
50 Algorithm of Morel and Renvoise
51 D.M. Dhamdhere
52 ACM TOPLAS, Vol. 13, Num. 2. Apr. 1991
53
54 Efficiently Computing Static Single Assignment Form and the Control
55 Dependence Graph
56 R. Cytron, J. Ferrante, B.K. Rosen, M.N. Wegman, and F.K. Zadeck
57 ACM TOPLAS, Vol. 13, Num. 4, Oct. 1991
58
59 Lazy Code Motion
60 J. Knoop, O. Ruthing, B. Steffen
61 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
62
63 What's In a Region? Or Computing Control Dependence Regions in Near-Linear
64 Time for Reducible Flow Control
65 Thomas Ball
66 ACM Letters on Programming Languages and Systems,
67 Vol. 2, Num. 1-4, Mar-Dec 1993
68
69 An Efficient Representation for Sparse Sets
70 Preston Briggs, Linda Torczon
71 ACM Letters on Programming Languages and Systems,
72 Vol. 2, Num. 1-4, Mar-Dec 1993
73
74 A Variation of Knoop, Ruthing, and Steffen's Lazy Code Motion
75 K-H Drechsler, M.P. Stadel
76 ACM SIGPLAN Notices, Vol. 28, Num. 5, May 1993
77
78 Partial Dead Code Elimination
79 J. Knoop, O. Ruthing, B. Steffen
80 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
81
82 Effective Partial Redundancy Elimination
83 P. Briggs, K.D. Cooper
84 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
85
86 The Program Structure Tree: Computing Control Regions in Linear Time
87 R. Johnson, D. Pearson, K. Pingali
88 ACM SIGPLAN Notices, Vol. 29, Num. 6, Jun. 1994
89
90 Optimal Code Motion: Theory and Practice
91 J. Knoop, O. Ruthing, B. Steffen
92 ACM TOPLAS, Vol. 16, Num. 4, Jul. 1994
93
94 The power of assignment motion
95 J. Knoop, O. Ruthing, B. Steffen
96 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
97
98 Global code motion / global value numbering
99 C. Click
100 ACM SIGPLAN Notices Vol. 30, Num. 6, Jun. 1995, '95 Conference on PLDI
101
102 Value Driven Redundancy Elimination
103 L.T. Simpson
104 Rice University Ph.D. thesis, Apr. 1996
105
106 Value Numbering
107 L.T. Simpson
108 Massively Scalar Compiler Project, Rice University, Sep. 1996
109
110 High Performance Compilers for Parallel Computing
111 Michael Wolfe
112 Addison-Wesley, 1996
113
114 Advanced Compiler Design and Implementation
115 Steven Muchnick
116 Morgan Kaufmann, 1997
117
118 Building an Optimizing Compiler
119 Robert Morgan
120 Digital Press, 1998
121
122 People wishing to speed up the code here should read:
123 Elimination Algorithms for Data Flow Analysis
124 B.G. Ryder, M.C. Paull
125 ACM Computing Surveys, Vol. 18, Num. 3, Sep. 1986
126
127 How to Analyze Large Programs Efficiently and Informatively
128 D.M. Dhamdhere, B.K. Rosen, F.K. Zadeck
129 ACM SIGPLAN Notices Vol. 27, Num. 7, Jul. 1992, '92 Conference on PLDI
130
131 People wishing to do something different can find various possibilities
132 in the above papers and elsewhere.
133 */
134
135 #include "config.h"
136 #include "system.h"
137 #include "coretypes.h"
138 #include "backend.h"
139 #include "target.h"
140 #include "rtl.h"
141 #include "tree.h"
142 #include "predict.h"
143 #include "df.h"
144 #include "memmodel.h"
145 #include "tm_p.h"
146 #include "insn-config.h"
147 #include "print-rtl.h"
148 #include "regs.h"
149 #include "ira.h"
150 #include "recog.h"
151 #include "diagnostic-core.h"
152 #include "cfgrtl.h"
153 #include "cfganal.h"
154 #include "lcm.h"
155 #include "cfgcleanup.h"
156 #include "expr.h"
157 #include "intl.h"
158 #include "tree-pass.h"
159 #include "dbgcnt.h"
160 #include "gcse.h"
161 #include "gcse-common.h"
162 #include "function-abi.h"
163
164 /* We support GCSE via Partial Redundancy Elimination. PRE optimizations
165 are a superset of those done by classic GCSE.
166
167 Two passes of copy/constant propagation are done around PRE or hoisting
168 because the first one enables more GCSE and the second one helps to clean
169 up the copies that PRE and HOIST create. This is needed more for PRE than
170 for HOIST because code hoisting will try to use an existing register
171 containing the common subexpression rather than create a new one. This is
172 harder to do for PRE because of the code motion (which HOIST doesn't do).
173
174 Expressions we are interested in GCSE-ing are of the form
175 (set (pseudo-reg) (expression)).
176 Function want_to_gcse_p says what these are.
177
178 In addition, expressions in REG_EQUAL notes are candidates for GCSE-ing.
179 This allows PRE to hoist expressions that are expressed in multiple insns,
180 such as complex address calculations (e.g. for PIC code, or loads with a
181 high part and a low part).
182
183 PRE handles moving invariant expressions out of loops (by treating them as
184 partially redundant).
185
186 **********************
187
188 We used to support multiple passes but there are diminishing returns in
189 doing so. The first pass usually makes 90% of the changes that are doable.
190 A second pass can make a few more changes made possible by the first pass.
191 Experiments show any further passes don't make enough changes to justify
192 the expense.
193
194 A study of spec92 using an unlimited number of passes:
195 [1 pass] = 1208 substitutions, [2] = 577, [3] = 202, [4] = 192, [5] = 83,
196 [6] = 34, [7] = 17, [8] = 9, [9] = 4, [10] = 4, [11] = 2,
197 [12] = 2, [13] = 1, [15] = 1, [16] = 2, [41] = 1
198
199 It was found doing copy propagation between each pass enables further
200 substitutions.
201
202 This study was done before expressions in REG_EQUAL notes were added as
203 candidate expressions for optimization, and before the GIMPLE optimizers
204 were added. Probably, multiple passes is even less efficient now than
205 at the time when the study was conducted.
206
207 PRE is quite expensive in complicated functions because the DFA can take
208 a while to converge. Hence we only perform one pass.
209
210 **********************
211
212 The steps for PRE are:
213
214 1) Build the hash table of expressions we wish to GCSE (expr_hash_table).
215
216 2) Perform the data flow analysis for PRE.
217
218 3) Delete the redundant instructions
219
220 4) Insert the required copies [if any] that make the partially
221 redundant instructions fully redundant.
222
223 5) For other reaching expressions, insert an instruction to copy the value
224 to a newly created pseudo that will reach the redundant instruction.
225
226 The deletion is done first so that when we do insertions we
227 know which pseudo reg to use.
228
229 Various papers have argued that PRE DFA is expensive (O(n^2)) and others
230 argue it is not. The number of iterations for the algorithm to converge
231 is typically 2-4 so I don't view it as that expensive (relatively speaking).
232
233 PRE GCSE depends heavily on the second CPROP pass to clean up the copies
234 we create. To make an expression reach the place where it's redundant,
235 the result of the expression is copied to a new register, and the redundant
236 expression is deleted by replacing it with this new register. Classic GCSE
237 doesn't have this problem as much as it computes the reaching defs of
238 each register in each block and thus can try to use an existing
239 register. */
240
241 /* GCSE global vars. */
242
243 struct target_gcse default_target_gcse;
244 #if SWITCHABLE_TARGET
245 struct target_gcse *this_target_gcse = &default_target_gcse;
246 #endif
247
248 /* Set to non-zero if CSE should run after all GCSE optimizations are done. */
249 int flag_rerun_cse_after_global_opts;
250
251 /* An obstack for our working variables. */
252 static struct obstack gcse_obstack;
253
254 /* Hash table of expressions. */
255
256 struct gcse_expr
257 {
258 /* The expression. */
259 rtx expr;
260 /* Index in the available expression bitmaps. */
261 int bitmap_index;
262 /* Next entry with the same hash. */
263 struct gcse_expr *next_same_hash;
264 /* List of anticipatable occurrences in basic blocks in the function.
265 An "anticipatable occurrence" is one that is the first occurrence in the
266 basic block, the operands are not modified in the basic block prior
267 to the occurrence and the output is not used between the start of
268 the block and the occurrence. */
269 struct gcse_occr *antic_occr;
270 /* List of available occurrence in basic blocks in the function.
271 An "available occurrence" is one that is the last occurrence in the
272 basic block and the operands are not modified by following statements in
273 the basic block [including this insn]. */
274 struct gcse_occr *avail_occr;
275 /* Non-null if the computation is PRE redundant.
276 The value is the newly created pseudo-reg to record a copy of the
277 expression in all the places that reach the redundant copy. */
278 rtx reaching_reg;
279 /* Maximum distance in instructions this expression can travel.
280 We avoid moving simple expressions for more than a few instructions
281 to keep register pressure under control.
282 A value of "0" removes restrictions on how far the expression can
283 travel. */
284 HOST_WIDE_INT max_distance;
285 };
286
287 /* Occurrence of an expression.
288 There is one per basic block. If a pattern appears more than once the
289 last appearance is used [or first for anticipatable expressions]. */
290
291 struct gcse_occr
292 {
293 /* Next occurrence of this expression. */
294 struct gcse_occr *next;
295 /* The insn that computes the expression. */
296 rtx_insn *insn;
297 /* Nonzero if this [anticipatable] occurrence has been deleted. */
298 char deleted_p;
299 /* Nonzero if this [available] occurrence has been copied to
300 reaching_reg. */
301 /* ??? This is mutually exclusive with deleted_p, so they could share
302 the same byte. */
303 char copied_p;
304 };
305
306 typedef struct gcse_occr *occr_t;
307
308 /* Expression hash tables.
309 Each hash table is an array of buckets.
310 ??? It is known that if it were an array of entries, structure elements
311 `next_same_hash' and `bitmap_index' wouldn't be necessary. However, it is
312 not clear whether in the final analysis a sufficient amount of memory would
313 be saved as the size of the available expression bitmaps would be larger
314 [one could build a mapping table without holes afterwards though].
315 Someday I'll perform the computation and figure it out. */
316
317 struct gcse_hash_table_d
318 {
319 /* The table itself.
320 This is an array of `expr_hash_table_size' elements. */
321 struct gcse_expr **table;
322
323 /* Size of the hash table, in elements. */
324 unsigned int size;
325
326 /* Number of hash table elements. */
327 unsigned int n_elems;
328 };
329
330 /* Expression hash table. */
331 static struct gcse_hash_table_d expr_hash_table;
332
333 /* This is a list of expressions which are MEMs and will be used by load
334 or store motion.
335 Load motion tracks MEMs which aren't killed by anything except itself,
336 i.e. loads and stores to a single location.
337 We can then allow movement of these MEM refs with a little special
338 allowance. (all stores copy the same value to the reaching reg used
339 for the loads). This means all values used to store into memory must have
340 no side effects so we can re-issue the setter value. */
341
342 struct ls_expr
343 {
344 struct gcse_expr * expr; /* Gcse expression reference for LM. */
345 rtx pattern; /* Pattern of this mem. */
346 rtx pattern_regs; /* List of registers mentioned by the mem. */
347 vec<rtx_insn *> stores; /* INSN list of stores seen. */
348 struct ls_expr * next; /* Next in the list. */
349 int invalid; /* Invalid for some reason. */
350 int index; /* If it maps to a bitmap index. */
351 unsigned int hash_index; /* Index when in a hash table. */
352 rtx reaching_reg; /* Register to use when re-writing. */
353 };
354
355 /* Head of the list of load/store memory refs. */
356 static struct ls_expr * pre_ldst_mems = NULL;
357
358 struct pre_ldst_expr_hasher : nofree_ptr_hash <ls_expr>
359 {
360 typedef value_type compare_type;
361 static inline hashval_t hash (const ls_expr *);
362 static inline bool equal (const ls_expr *, const ls_expr *);
363 };
364
365 /* Hashtable helpers. */
366 inline hashval_t
hash(const ls_expr * x)367 pre_ldst_expr_hasher::hash (const ls_expr *x)
368 {
369 int do_not_record_p = 0;
370 return
371 hash_rtx (x->pattern, GET_MODE (x->pattern), &do_not_record_p, NULL, false);
372 }
373
374 static int expr_equiv_p (const_rtx, const_rtx);
375
376 inline bool
equal(const ls_expr * ptr1,const ls_expr * ptr2)377 pre_ldst_expr_hasher::equal (const ls_expr *ptr1,
378 const ls_expr *ptr2)
379 {
380 return expr_equiv_p (ptr1->pattern, ptr2->pattern);
381 }
382
383 /* Hashtable for the load/store memory refs. */
384 static hash_table<pre_ldst_expr_hasher> *pre_ldst_table;
385
386 /* Bitmap containing one bit for each register in the program.
387 Used when performing GCSE to track which registers have been set since
388 the start of the basic block. */
389 static regset reg_set_bitmap;
390
391 /* Array, indexed by basic block number for a list of insns which modify
392 memory within that block. */
393 static vec<rtx_insn *> *modify_mem_list;
394 static bitmap modify_mem_list_set;
395
396 /* This array parallels modify_mem_list, except that it stores MEMs
397 being set and their canonicalized memory addresses. */
398 static vec<modify_pair> *canon_modify_mem_list;
399
400 /* Bitmap indexed by block numbers to record which blocks contain
401 function calls. */
402 static bitmap blocks_with_calls;
403
404 /* Various variables for statistics gathering. */
405
406 /* Memory used in a pass.
407 This isn't intended to be absolutely precise. Its intent is only
408 to keep an eye on memory usage. */
409 static int bytes_used;
410
411 /* GCSE substitutions made. */
412 static int gcse_subst_count;
413 /* Number of copy instructions created. */
414 static int gcse_create_count;
415
416 /* Doing code hoisting. */
417 static bool doing_code_hoisting_p = false;
418
419 /* For available exprs */
420 static sbitmap *ae_kill;
421
422 /* Data stored for each basic block. */
423 struct bb_data
424 {
425 /* Maximal register pressure inside basic block for given register class
426 (defined only for the pressure classes). */
427 int max_reg_pressure[N_REG_CLASSES];
428 /* Recorded register pressure of basic block before trying to hoist
429 an expression. Will be used to restore the register pressure
430 if the expression should not be hoisted. */
431 int old_pressure;
432 /* Recorded register live_in info of basic block during code hoisting
433 process. BACKUP is used to record live_in info before trying to
434 hoist an expression, and will be used to restore LIVE_IN if the
435 expression should not be hoisted. */
436 bitmap live_in, backup;
437 };
438
439 #define BB_DATA(bb) ((struct bb_data *) (bb)->aux)
440
441 static basic_block curr_bb;
442
443 /* Current register pressure for each pressure class. */
444 static int curr_reg_pressure[N_REG_CLASSES];
445
446
447 static void compute_can_copy (void);
448 static void *gmalloc (size_t) ATTRIBUTE_MALLOC;
449 static void *gcalloc (size_t, size_t) ATTRIBUTE_MALLOC;
450 static void *gcse_alloc (unsigned long);
451 static void alloc_gcse_mem (void);
452 static void free_gcse_mem (void);
453 static void hash_scan_insn (rtx_insn *, struct gcse_hash_table_d *);
454 static void hash_scan_set (rtx, rtx_insn *, struct gcse_hash_table_d *);
455 static void hash_scan_clobber (rtx, rtx_insn *, struct gcse_hash_table_d *);
456 static void hash_scan_call (rtx, rtx_insn *, struct gcse_hash_table_d *);
457 static int oprs_unchanged_p (const_rtx, const rtx_insn *, int);
458 static int oprs_anticipatable_p (const_rtx, const rtx_insn *);
459 static int oprs_available_p (const_rtx, const rtx_insn *);
460 static void insert_expr_in_table (rtx, machine_mode, rtx_insn *, int, int,
461 HOST_WIDE_INT, struct gcse_hash_table_d *);
462 static unsigned int hash_expr (const_rtx, machine_mode, int *, int);
463 static void record_last_reg_set_info (rtx_insn *, int);
464 static void record_last_mem_set_info (rtx_insn *);
465 static void record_last_set_info (rtx, const_rtx, void *);
466 static void compute_hash_table (struct gcse_hash_table_d *);
467 static void alloc_hash_table (struct gcse_hash_table_d *);
468 static void free_hash_table (struct gcse_hash_table_d *);
469 static void compute_hash_table_work (struct gcse_hash_table_d *);
470 static void dump_hash_table (FILE *, const char *, struct gcse_hash_table_d *);
471 static void compute_local_properties (sbitmap *, sbitmap *, sbitmap *,
472 struct gcse_hash_table_d *);
473 static void mems_conflict_for_gcse_p (rtx, const_rtx, void *);
474 static int load_killed_in_block_p (const_basic_block, int, const_rtx, int);
475 static void alloc_pre_mem (int, int);
476 static void free_pre_mem (void);
477 static struct edge_list *compute_pre_data (void);
478 static int pre_expr_reaches_here_p (basic_block, struct gcse_expr *,
479 basic_block);
480 static void insert_insn_end_basic_block (struct gcse_expr *, basic_block);
481 static void pre_insert_copy_insn (struct gcse_expr *, rtx_insn *);
482 static void pre_insert_copies (void);
483 static int pre_delete (void);
484 static int pre_gcse (struct edge_list *);
485 static int one_pre_gcse_pass (void);
486 static void add_label_notes (rtx, rtx_insn *);
487 static void alloc_code_hoist_mem (int, int);
488 static void free_code_hoist_mem (void);
489 static void compute_code_hoist_vbeinout (void);
490 static void compute_code_hoist_data (void);
491 static int should_hoist_expr_to_dom (basic_block, struct gcse_expr *,
492 basic_block,
493 sbitmap, HOST_WIDE_INT, int *,
494 enum reg_class,
495 int *, bitmap, rtx_insn *);
496 static int hoist_code (void);
497 static enum reg_class get_regno_pressure_class (int regno, int *nregs);
498 static enum reg_class get_pressure_class_and_nregs (rtx_insn *insn, int *nregs);
499 static int one_code_hoisting_pass (void);
500 static rtx_insn *process_insert_insn (struct gcse_expr *);
501 static int pre_edge_insert (struct edge_list *, struct gcse_expr **);
502 static int pre_expr_reaches_here_p_work (basic_block, struct gcse_expr *,
503 basic_block, char *);
504 static struct ls_expr * ldst_entry (rtx);
505 static void free_ldst_entry (struct ls_expr *);
506 static void free_ld_motion_mems (void);
507 static void print_ldst_list (FILE *);
508 static struct ls_expr * find_rtx_in_ldst (rtx);
509 static int simple_mem (const_rtx);
510 static void invalidate_any_buried_refs (rtx);
511 static void compute_ld_motion_mems (void);
512 static void trim_ld_motion_mems (void);
513 static void update_ld_motion_stores (struct gcse_expr *);
514 static void clear_modify_mem_tables (void);
515 static void free_modify_mem_tables (void);
516
517 #define GNEW(T) ((T *) gmalloc (sizeof (T)))
518 #define GCNEW(T) ((T *) gcalloc (1, sizeof (T)))
519
520 #define GNEWVEC(T, N) ((T *) gmalloc (sizeof (T) * (N)))
521 #define GCNEWVEC(T, N) ((T *) gcalloc ((N), sizeof (T)))
522
523 #define GNEWVAR(T, S) ((T *) gmalloc ((S)))
524 #define GCNEWVAR(T, S) ((T *) gcalloc (1, (S)))
525
526 #define GOBNEW(T) ((T *) gcse_alloc (sizeof (T)))
527 #define GOBNEWVAR(T, S) ((T *) gcse_alloc ((S)))
528
529 /* Misc. utilities. */
530
531 #define can_copy \
532 (this_target_gcse->x_can_copy)
533 #define can_copy_init_p \
534 (this_target_gcse->x_can_copy_init_p)
535
536 /* Compute which modes support reg/reg copy operations. */
537
538 static void
compute_can_copy(void)539 compute_can_copy (void)
540 {
541 int i;
542 #ifndef AVOID_CCMODE_COPIES
543 rtx reg;
544 rtx_insn *insn;
545 #endif
546 memset (can_copy, 0, NUM_MACHINE_MODES);
547
548 start_sequence ();
549 for (i = 0; i < NUM_MACHINE_MODES; i++)
550 if (GET_MODE_CLASS (i) == MODE_CC)
551 {
552 #ifdef AVOID_CCMODE_COPIES
553 can_copy[i] = 0;
554 #else
555 reg = gen_rtx_REG ((machine_mode) i, LAST_VIRTUAL_REGISTER + 1);
556 insn = emit_insn (gen_rtx_SET (reg, reg));
557 if (recog (PATTERN (insn), insn, NULL) >= 0)
558 can_copy[i] = 1;
559 #endif
560 }
561 else
562 can_copy[i] = 1;
563
564 end_sequence ();
565 }
566
567 /* Returns whether the mode supports reg/reg copy operations. */
568
569 bool
can_copy_p(machine_mode mode)570 can_copy_p (machine_mode mode)
571 {
572 if (! can_copy_init_p)
573 {
574 compute_can_copy ();
575 can_copy_init_p = true;
576 }
577
578 return can_copy[mode] != 0;
579 }
580
581 /* Cover function to xmalloc to record bytes allocated. */
582
583 static void *
gmalloc(size_t size)584 gmalloc (size_t size)
585 {
586 bytes_used += size;
587 return xmalloc (size);
588 }
589
590 /* Cover function to xcalloc to record bytes allocated. */
591
592 static void *
gcalloc(size_t nelem,size_t elsize)593 gcalloc (size_t nelem, size_t elsize)
594 {
595 bytes_used += nelem * elsize;
596 return xcalloc (nelem, elsize);
597 }
598
599 /* Cover function to obstack_alloc. */
600
601 static void *
gcse_alloc(unsigned long size)602 gcse_alloc (unsigned long size)
603 {
604 bytes_used += size;
605 return obstack_alloc (&gcse_obstack, size);
606 }
607
608 /* Allocate memory for the reg/memory set tracking tables.
609 This is called at the start of each pass. */
610
611 static void
alloc_gcse_mem(void)612 alloc_gcse_mem (void)
613 {
614 /* Allocate vars to track sets of regs. */
615 reg_set_bitmap = ALLOC_REG_SET (NULL);
616
617 /* Allocate array to keep a list of insns which modify memory in each
618 basic block. The two typedefs are needed to work around the
619 pre-processor limitation with template types in macro arguments. */
620 typedef vec<rtx_insn *> vec_rtx_heap;
621 typedef vec<modify_pair> vec_modify_pair_heap;
622 modify_mem_list = GCNEWVEC (vec_rtx_heap, last_basic_block_for_fn (cfun));
623 canon_modify_mem_list = GCNEWVEC (vec_modify_pair_heap,
624 last_basic_block_for_fn (cfun));
625 modify_mem_list_set = BITMAP_ALLOC (NULL);
626 blocks_with_calls = BITMAP_ALLOC (NULL);
627 }
628
629 /* Free memory allocated by alloc_gcse_mem. */
630
631 static void
free_gcse_mem(void)632 free_gcse_mem (void)
633 {
634 FREE_REG_SET (reg_set_bitmap);
635
636 free_modify_mem_tables ();
637 BITMAP_FREE (modify_mem_list_set);
638 BITMAP_FREE (blocks_with_calls);
639 }
640
641 /* Compute the local properties of each recorded expression.
642
643 Local properties are those that are defined by the block, irrespective of
644 other blocks.
645
646 An expression is transparent in a block if its operands are not modified
647 in the block.
648
649 An expression is computed (locally available) in a block if it is computed
650 at least once and expression would contain the same value if the
651 computation was moved to the end of the block.
652
653 An expression is locally anticipatable in a block if it is computed at
654 least once and expression would contain the same value if the computation
655 was moved to the beginning of the block.
656
657 We call this routine for pre and code hoisting. They all compute
658 basically the same information and thus can easily share this code.
659
660 TRANSP, COMP, and ANTLOC are destination sbitmaps for recording local
661 properties. If NULL, then it is not necessary to compute or record that
662 particular property.
663
664 TABLE controls which hash table to look at. */
665
666 static void
compute_local_properties(sbitmap * transp,sbitmap * comp,sbitmap * antloc,struct gcse_hash_table_d * table)667 compute_local_properties (sbitmap *transp, sbitmap *comp, sbitmap *antloc,
668 struct gcse_hash_table_d *table)
669 {
670 unsigned int i;
671
672 /* Initialize any bitmaps that were passed in. */
673 if (transp)
674 {
675 bitmap_vector_ones (transp, last_basic_block_for_fn (cfun));
676 }
677
678 if (comp)
679 bitmap_vector_clear (comp, last_basic_block_for_fn (cfun));
680 if (antloc)
681 bitmap_vector_clear (antloc, last_basic_block_for_fn (cfun));
682
683 for (i = 0; i < table->size; i++)
684 {
685 struct gcse_expr *expr;
686
687 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
688 {
689 int indx = expr->bitmap_index;
690 struct gcse_occr *occr;
691
692 /* The expression is transparent in this block if it is not killed.
693 We start by assuming all are transparent [none are killed], and
694 then reset the bits for those that are. */
695 if (transp)
696 compute_transp (expr->expr, indx, transp,
697 blocks_with_calls,
698 modify_mem_list_set,
699 canon_modify_mem_list);
700
701 /* The occurrences recorded in antic_occr are exactly those that
702 we want to set to nonzero in ANTLOC. */
703 if (antloc)
704 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
705 {
706 bitmap_set_bit (antloc[BLOCK_FOR_INSN (occr->insn)->index], indx);
707
708 /* While we're scanning the table, this is a good place to
709 initialize this. */
710 occr->deleted_p = 0;
711 }
712
713 /* The occurrences recorded in avail_occr are exactly those that
714 we want to set to nonzero in COMP. */
715 if (comp)
716 for (occr = expr->avail_occr; occr != NULL; occr = occr->next)
717 {
718 bitmap_set_bit (comp[BLOCK_FOR_INSN (occr->insn)->index], indx);
719
720 /* While we're scanning the table, this is a good place to
721 initialize this. */
722 occr->copied_p = 0;
723 }
724
725 /* While we're scanning the table, this is a good place to
726 initialize this. */
727 expr->reaching_reg = 0;
728 }
729 }
730 }
731
732 /* Hash table support. */
733
734 struct reg_avail_info
735 {
736 basic_block last_bb;
737 int first_set;
738 int last_set;
739 };
740
741 static struct reg_avail_info *reg_avail_info;
742 static basic_block current_bb;
743
744 /* See whether X, the source of a set, is something we want to consider for
745 GCSE. */
746
747 static int
want_to_gcse_p(rtx x,machine_mode mode,HOST_WIDE_INT * max_distance_ptr)748 want_to_gcse_p (rtx x, machine_mode mode, HOST_WIDE_INT *max_distance_ptr)
749 {
750 #ifdef STACK_REGS
751 /* On register stack architectures, don't GCSE constants from the
752 constant pool, as the benefits are often swamped by the overhead
753 of shuffling the register stack between basic blocks. */
754 if (IS_STACK_MODE (GET_MODE (x)))
755 x = avoid_constant_pool_reference (x);
756 #endif
757
758 /* GCSE'ing constants:
759
760 We do not specifically distinguish between constant and non-constant
761 expressions in PRE and Hoist. We use set_src_cost below to limit
762 the maximum distance simple expressions can travel.
763
764 Nevertheless, constants are much easier to GCSE, and, hence,
765 it is easy to overdo the optimizations. Usually, excessive PRE and
766 Hoisting of constant leads to increased register pressure.
767
768 RA can deal with this by rematerialing some of the constants.
769 Therefore, it is important that the back-end generates sets of constants
770 in a way that allows reload rematerialize them under high register
771 pressure, i.e., a pseudo register with REG_EQUAL to constant
772 is set only once. Failing to do so will result in IRA/reload
773 spilling such constants under high register pressure instead of
774 rematerializing them. */
775
776 switch (GET_CODE (x))
777 {
778 case REG:
779 case SUBREG:
780 case CALL:
781 return 0;
782
783 CASE_CONST_ANY:
784 if (!doing_code_hoisting_p)
785 /* Do not PRE constants. */
786 return 0;
787
788 /* FALLTHRU */
789
790 default:
791 if (doing_code_hoisting_p)
792 /* PRE doesn't implement max_distance restriction. */
793 {
794 int cost;
795 HOST_WIDE_INT max_distance;
796
797 gcc_assert (!optimize_function_for_speed_p (cfun)
798 && optimize_function_for_size_p (cfun));
799 cost = set_src_cost (x, mode, 0);
800
801 if (cost < COSTS_N_INSNS (param_gcse_unrestricted_cost))
802 {
803 max_distance
804 = ((HOST_WIDE_INT)param_gcse_cost_distance_ratio * cost) / 10;
805 if (max_distance == 0)
806 return 0;
807
808 gcc_assert (max_distance > 0);
809 }
810 else
811 max_distance = 0;
812
813 if (max_distance_ptr)
814 *max_distance_ptr = max_distance;
815 }
816
817 return can_assign_to_reg_without_clobbers_p (x, mode);
818 }
819 }
820
821 /* Used internally by can_assign_to_reg_without_clobbers_p. */
822
823 static GTY(()) rtx_insn *test_insn;
824
825 /* Return true if we can assign X to a pseudo register of mode MODE
826 such that the resulting insn does not result in clobbering a hard
827 register as a side-effect.
828
829 Additionally, if the target requires it, check that the resulting insn
830 can be copied. If it cannot, this means that X is special and probably
831 has hidden side-effects we don't want to mess with.
832
833 This function is typically used by code motion passes, to verify
834 that it is safe to insert an insn without worrying about clobbering
835 maybe live hard regs. */
836
837 bool
can_assign_to_reg_without_clobbers_p(rtx x,machine_mode mode)838 can_assign_to_reg_without_clobbers_p (rtx x, machine_mode mode)
839 {
840 int num_clobbers = 0;
841 int icode;
842 bool can_assign = false;
843
844 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
845 if (general_operand (x, mode))
846 return 1;
847 else if (GET_MODE (x) == VOIDmode)
848 return 0;
849
850 /* Otherwise, check if we can make a valid insn from it. First initialize
851 our test insn if we haven't already. */
852 if (test_insn == 0)
853 {
854 test_insn
855 = make_insn_raw (gen_rtx_SET (gen_rtx_REG (word_mode,
856 FIRST_PSEUDO_REGISTER * 2),
857 const0_rtx));
858 SET_NEXT_INSN (test_insn) = SET_PREV_INSN (test_insn) = 0;
859 INSN_LOCATION (test_insn) = UNKNOWN_LOCATION;
860 }
861
862 /* Now make an insn like the one we would make when GCSE'ing and see if
863 valid. */
864 PUT_MODE (SET_DEST (PATTERN (test_insn)), mode);
865 SET_SRC (PATTERN (test_insn)) = x;
866
867 icode = recog (PATTERN (test_insn), test_insn, &num_clobbers);
868
869 /* If the test insn is valid and doesn't need clobbers, and the target also
870 has no objections, we're good. */
871 if (icode >= 0
872 && (num_clobbers == 0 || !added_clobbers_hard_reg_p (icode))
873 && ! (targetm.cannot_copy_insn_p
874 && targetm.cannot_copy_insn_p (test_insn)))
875 can_assign = true;
876
877 /* Make sure test_insn doesn't have any pointers into GC space. */
878 SET_SRC (PATTERN (test_insn)) = NULL_RTX;
879
880 return can_assign;
881 }
882
883 /* Return nonzero if the operands of expression X are unchanged from the
884 start of INSN's basic block up to but not including INSN (if AVAIL_P == 0),
885 or from INSN to the end of INSN's basic block (if AVAIL_P != 0). */
886
887 static int
oprs_unchanged_p(const_rtx x,const rtx_insn * insn,int avail_p)888 oprs_unchanged_p (const_rtx x, const rtx_insn *insn, int avail_p)
889 {
890 int i, j;
891 enum rtx_code code;
892 const char *fmt;
893
894 if (x == 0)
895 return 1;
896
897 code = GET_CODE (x);
898 switch (code)
899 {
900 case REG:
901 {
902 struct reg_avail_info *info = ®_avail_info[REGNO (x)];
903
904 if (info->last_bb != current_bb)
905 return 1;
906 if (avail_p)
907 return info->last_set < DF_INSN_LUID (insn);
908 else
909 return info->first_set >= DF_INSN_LUID (insn);
910 }
911
912 case MEM:
913 if (! flag_gcse_lm
914 || load_killed_in_block_p (current_bb, DF_INSN_LUID (insn),
915 x, avail_p))
916 return 0;
917 else
918 return oprs_unchanged_p (XEXP (x, 0), insn, avail_p);
919
920 case PRE_DEC:
921 case PRE_INC:
922 case POST_DEC:
923 case POST_INC:
924 case PRE_MODIFY:
925 case POST_MODIFY:
926 return 0;
927
928 case PC:
929 case CONST:
930 CASE_CONST_ANY:
931 case SYMBOL_REF:
932 case LABEL_REF:
933 case ADDR_VEC:
934 case ADDR_DIFF_VEC:
935 return 1;
936
937 default:
938 break;
939 }
940
941 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
942 {
943 if (fmt[i] == 'e')
944 {
945 /* If we are about to do the last recursive call needed at this
946 level, change it into iteration. This function is called enough
947 to be worth it. */
948 if (i == 0)
949 return oprs_unchanged_p (XEXP (x, i), insn, avail_p);
950
951 else if (! oprs_unchanged_p (XEXP (x, i), insn, avail_p))
952 return 0;
953 }
954 else if (fmt[i] == 'E')
955 for (j = 0; j < XVECLEN (x, i); j++)
956 if (! oprs_unchanged_p (XVECEXP (x, i, j), insn, avail_p))
957 return 0;
958 }
959
960 return 1;
961 }
962
963 /* Info passed from load_killed_in_block_p to mems_conflict_for_gcse_p. */
964
965 struct mem_conflict_info
966 {
967 /* A memory reference for a load instruction, mems_conflict_for_gcse_p will
968 see if a memory store conflicts with this memory load. */
969 const_rtx mem;
970
971 /* True if mems_conflict_for_gcse_p finds a conflict between two memory
972 references. */
973 bool conflict;
974 };
975
976 /* DEST is the output of an instruction. If it is a memory reference and
977 possibly conflicts with the load found in DATA, then communicate this
978 information back through DATA. */
979
980 static void
mems_conflict_for_gcse_p(rtx dest,const_rtx setter ATTRIBUTE_UNUSED,void * data)981 mems_conflict_for_gcse_p (rtx dest, const_rtx setter ATTRIBUTE_UNUSED,
982 void *data)
983 {
984 struct mem_conflict_info *mci = (struct mem_conflict_info *) data;
985
986 while (GET_CODE (dest) == SUBREG
987 || GET_CODE (dest) == ZERO_EXTRACT
988 || GET_CODE (dest) == STRICT_LOW_PART)
989 dest = XEXP (dest, 0);
990
991 /* If DEST is not a MEM, then it will not conflict with the load. Note
992 that function calls are assumed to clobber memory, but are handled
993 elsewhere. */
994 if (! MEM_P (dest))
995 return;
996
997 /* If we are setting a MEM in our list of specially recognized MEMs,
998 don't mark as killed this time. */
999 if (pre_ldst_mems != NULL && expr_equiv_p (dest, mci->mem))
1000 {
1001 if (!find_rtx_in_ldst (dest))
1002 mci->conflict = true;
1003 return;
1004 }
1005
1006 if (true_dependence (dest, GET_MODE (dest), mci->mem))
1007 mci->conflict = true;
1008 }
1009
1010 /* Return nonzero if the expression in X (a memory reference) is killed
1011 in block BB before or after the insn with the LUID in UID_LIMIT.
1012 AVAIL_P is nonzero for kills after UID_LIMIT, and zero for kills
1013 before UID_LIMIT.
1014
1015 To check the entire block, set UID_LIMIT to max_uid + 1 and
1016 AVAIL_P to 0. */
1017
1018 static int
load_killed_in_block_p(const_basic_block bb,int uid_limit,const_rtx x,int avail_p)1019 load_killed_in_block_p (const_basic_block bb, int uid_limit, const_rtx x,
1020 int avail_p)
1021 {
1022 vec<rtx_insn *> list = modify_mem_list[bb->index];
1023 rtx_insn *setter;
1024 unsigned ix;
1025
1026 /* If this is a readonly then we aren't going to be changing it. */
1027 if (MEM_READONLY_P (x))
1028 return 0;
1029
1030 FOR_EACH_VEC_ELT_REVERSE (list, ix, setter)
1031 {
1032 struct mem_conflict_info mci;
1033
1034 /* Ignore entries in the list that do not apply. */
1035 if ((avail_p
1036 && DF_INSN_LUID (setter) < uid_limit)
1037 || (! avail_p
1038 && DF_INSN_LUID (setter) > uid_limit))
1039 continue;
1040
1041 /* If SETTER is a call everything is clobbered. Note that calls
1042 to pure functions are never put on the list, so we need not
1043 worry about them. */
1044 if (CALL_P (setter))
1045 return 1;
1046
1047 /* SETTER must be an INSN of some kind that sets memory. Call
1048 note_stores to examine each hunk of memory that is modified. */
1049 mci.mem = x;
1050 mci.conflict = false;
1051 note_stores (setter, mems_conflict_for_gcse_p, &mci);
1052 if (mci.conflict)
1053 return 1;
1054 }
1055 return 0;
1056 }
1057
1058 /* Return nonzero if the operands of expression X are unchanged from
1059 the start of INSN's basic block up to but not including INSN. */
1060
1061 static int
oprs_anticipatable_p(const_rtx x,const rtx_insn * insn)1062 oprs_anticipatable_p (const_rtx x, const rtx_insn *insn)
1063 {
1064 return oprs_unchanged_p (x, insn, 0);
1065 }
1066
1067 /* Return nonzero if the operands of expression X are unchanged from
1068 INSN to the end of INSN's basic block. */
1069
1070 static int
oprs_available_p(const_rtx x,const rtx_insn * insn)1071 oprs_available_p (const_rtx x, const rtx_insn *insn)
1072 {
1073 return oprs_unchanged_p (x, insn, 1);
1074 }
1075
1076 /* Hash expression X.
1077
1078 MODE is only used if X is a CONST_INT. DO_NOT_RECORD_P is a boolean
1079 indicating if a volatile operand is found or if the expression contains
1080 something we don't want to insert in the table. HASH_TABLE_SIZE is
1081 the current size of the hash table to be probed. */
1082
1083 static unsigned int
hash_expr(const_rtx x,machine_mode mode,int * do_not_record_p,int hash_table_size)1084 hash_expr (const_rtx x, machine_mode mode, int *do_not_record_p,
1085 int hash_table_size)
1086 {
1087 unsigned int hash;
1088
1089 *do_not_record_p = 0;
1090
1091 hash = hash_rtx (x, mode, do_not_record_p, NULL, /*have_reg_qty=*/false);
1092 return hash % hash_table_size;
1093 }
1094
1095 /* Return nonzero if exp1 is equivalent to exp2. */
1096
1097 static int
expr_equiv_p(const_rtx x,const_rtx y)1098 expr_equiv_p (const_rtx x, const_rtx y)
1099 {
1100 return exp_equiv_p (x, y, 0, true);
1101 }
1102
1103 /* Insert expression X in INSN in the hash TABLE.
1104 If it is already present, record it as the last occurrence in INSN's
1105 basic block.
1106
1107 MODE is the mode of the value X is being stored into.
1108 It is only used if X is a CONST_INT.
1109
1110 ANTIC_P is nonzero if X is an anticipatable expression.
1111 AVAIL_P is nonzero if X is an available expression.
1112
1113 MAX_DISTANCE is the maximum distance in instructions this expression can
1114 be moved. */
1115
1116 static void
insert_expr_in_table(rtx x,machine_mode mode,rtx_insn * insn,int antic_p,int avail_p,HOST_WIDE_INT max_distance,struct gcse_hash_table_d * table)1117 insert_expr_in_table (rtx x, machine_mode mode, rtx_insn *insn,
1118 int antic_p,
1119 int avail_p, HOST_WIDE_INT max_distance,
1120 struct gcse_hash_table_d *table)
1121 {
1122 int found, do_not_record_p;
1123 unsigned int hash;
1124 struct gcse_expr *cur_expr, *last_expr = NULL;
1125 struct gcse_occr *antic_occr, *avail_occr;
1126
1127 hash = hash_expr (x, mode, &do_not_record_p, table->size);
1128
1129 /* Do not insert expression in table if it contains volatile operands,
1130 or if hash_expr determines the expression is something we don't want
1131 to or can't handle. */
1132 if (do_not_record_p)
1133 return;
1134
1135 cur_expr = table->table[hash];
1136 found = 0;
1137
1138 while (cur_expr && (found = expr_equiv_p (cur_expr->expr, x)) == 0)
1139 {
1140 /* If the expression isn't found, save a pointer to the end of
1141 the list. */
1142 last_expr = cur_expr;
1143 cur_expr = cur_expr->next_same_hash;
1144 }
1145
1146 if (! found)
1147 {
1148 cur_expr = GOBNEW (struct gcse_expr);
1149 bytes_used += sizeof (struct gcse_expr);
1150 if (table->table[hash] == NULL)
1151 /* This is the first pattern that hashed to this index. */
1152 table->table[hash] = cur_expr;
1153 else
1154 /* Add EXPR to end of this hash chain. */
1155 last_expr->next_same_hash = cur_expr;
1156
1157 /* Set the fields of the expr element. */
1158 cur_expr->expr = x;
1159 cur_expr->bitmap_index = table->n_elems++;
1160 cur_expr->next_same_hash = NULL;
1161 cur_expr->antic_occr = NULL;
1162 cur_expr->avail_occr = NULL;
1163 gcc_assert (max_distance >= 0);
1164 cur_expr->max_distance = max_distance;
1165 }
1166 else
1167 gcc_assert (cur_expr->max_distance == max_distance);
1168
1169 /* Now record the occurrence(s). */
1170 if (antic_p)
1171 {
1172 antic_occr = cur_expr->antic_occr;
1173
1174 if (antic_occr
1175 && BLOCK_FOR_INSN (antic_occr->insn) != BLOCK_FOR_INSN (insn))
1176 antic_occr = NULL;
1177
1178 if (antic_occr)
1179 /* Found another instance of the expression in the same basic block.
1180 Prefer the currently recorded one. We want the first one in the
1181 block and the block is scanned from start to end. */
1182 ; /* nothing to do */
1183 else
1184 {
1185 /* First occurrence of this expression in this basic block. */
1186 antic_occr = GOBNEW (struct gcse_occr);
1187 bytes_used += sizeof (struct gcse_occr);
1188 antic_occr->insn = insn;
1189 antic_occr->next = cur_expr->antic_occr;
1190 antic_occr->deleted_p = 0;
1191 cur_expr->antic_occr = antic_occr;
1192 }
1193 }
1194
1195 if (avail_p)
1196 {
1197 avail_occr = cur_expr->avail_occr;
1198
1199 if (avail_occr
1200 && BLOCK_FOR_INSN (avail_occr->insn) == BLOCK_FOR_INSN (insn))
1201 {
1202 /* Found another instance of the expression in the same basic block.
1203 Prefer this occurrence to the currently recorded one. We want
1204 the last one in the block and the block is scanned from start
1205 to end. */
1206 avail_occr->insn = insn;
1207 }
1208 else
1209 {
1210 /* First occurrence of this expression in this basic block. */
1211 avail_occr = GOBNEW (struct gcse_occr);
1212 bytes_used += sizeof (struct gcse_occr);
1213 avail_occr->insn = insn;
1214 avail_occr->next = cur_expr->avail_occr;
1215 avail_occr->deleted_p = 0;
1216 cur_expr->avail_occr = avail_occr;
1217 }
1218 }
1219 }
1220
1221 /* Scan SET present in INSN and add an entry to the hash TABLE. */
1222
1223 static void
hash_scan_set(rtx set,rtx_insn * insn,struct gcse_hash_table_d * table)1224 hash_scan_set (rtx set, rtx_insn *insn, struct gcse_hash_table_d *table)
1225 {
1226 rtx src = SET_SRC (set);
1227 rtx dest = SET_DEST (set);
1228 rtx note;
1229
1230 if (GET_CODE (src) == CALL)
1231 hash_scan_call (src, insn, table);
1232
1233 else if (REG_P (dest))
1234 {
1235 unsigned int regno = REGNO (dest);
1236 HOST_WIDE_INT max_distance = 0;
1237
1238 /* See if a REG_EQUAL note shows this equivalent to a simpler expression.
1239
1240 This allows us to do a single GCSE pass and still eliminate
1241 redundant constants, addresses or other expressions that are
1242 constructed with multiple instructions.
1243
1244 However, keep the original SRC if INSN is a simple reg-reg move.
1245 In this case, there will almost always be a REG_EQUAL note on the
1246 insn that sets SRC. By recording the REG_EQUAL value here as SRC
1247 for INSN, we miss copy propagation opportunities and we perform the
1248 same PRE GCSE operation repeatedly on the same REG_EQUAL value if we
1249 do more than one PRE GCSE pass.
1250
1251 Note that this does not impede profitable constant propagations. We
1252 "look through" reg-reg sets in lookup_avail_set. */
1253 note = find_reg_equal_equiv_note (insn);
1254 if (note != 0
1255 && REG_NOTE_KIND (note) == REG_EQUAL
1256 && !REG_P (src)
1257 && want_to_gcse_p (XEXP (note, 0), GET_MODE (dest), NULL))
1258 src = XEXP (note, 0), set = gen_rtx_SET (dest, src);
1259
1260 /* Only record sets of pseudo-regs in the hash table. */
1261 if (regno >= FIRST_PSEUDO_REGISTER
1262 /* Don't GCSE something if we can't do a reg/reg copy. */
1263 && can_copy_p (GET_MODE (dest))
1264 /* GCSE commonly inserts instruction after the insn. We can't
1265 do that easily for EH edges so disable GCSE on these for now. */
1266 /* ??? We can now easily create new EH landing pads at the
1267 gimple level, for splitting edges; there's no reason we
1268 can't do the same thing at the rtl level. */
1269 && !can_throw_internal (insn)
1270 /* Is SET_SRC something we want to gcse? */
1271 && want_to_gcse_p (src, GET_MODE (dest), &max_distance)
1272 /* Don't CSE a nop. */
1273 && ! set_noop_p (set)
1274 /* Don't GCSE if it has attached REG_EQUIV note.
1275 At this point this only function parameters should have
1276 REG_EQUIV notes and if the argument slot is used somewhere
1277 explicitly, it means address of parameter has been taken,
1278 so we should not extend the lifetime of the pseudo. */
1279 && (note == NULL_RTX || ! MEM_P (XEXP (note, 0))))
1280 {
1281 /* An expression is not anticipatable if its operands are
1282 modified before this insn or if this is not the only SET in
1283 this insn. The latter condition does not have to mean that
1284 SRC itself is not anticipatable, but we just will not be
1285 able to handle code motion of insns with multiple sets. */
1286 int antic_p = oprs_anticipatable_p (src, insn)
1287 && !multiple_sets (insn);
1288 /* An expression is not available if its operands are
1289 subsequently modified, including this insn. It's also not
1290 available if this is a branch, because we can't insert
1291 a set after the branch. */
1292 int avail_p = (oprs_available_p (src, insn)
1293 && ! JUMP_P (insn));
1294
1295 insert_expr_in_table (src, GET_MODE (dest), insn, antic_p, avail_p,
1296 max_distance, table);
1297 }
1298 }
1299 /* In case of store we want to consider the memory value as available in
1300 the REG stored in that memory. This makes it possible to remove
1301 redundant loads from due to stores to the same location. */
1302 else if (flag_gcse_las && REG_P (src) && MEM_P (dest))
1303 {
1304 unsigned int regno = REGNO (src);
1305 HOST_WIDE_INT max_distance = 0;
1306
1307 /* Only record sets of pseudo-regs in the hash table. */
1308 if (regno >= FIRST_PSEUDO_REGISTER
1309 /* Don't GCSE something if we can't do a reg/reg copy. */
1310 && can_copy_p (GET_MODE (src))
1311 /* GCSE commonly inserts instruction after the insn. We can't
1312 do that easily for EH edges so disable GCSE on these for now. */
1313 && !can_throw_internal (insn)
1314 /* Is SET_DEST something we want to gcse? */
1315 && want_to_gcse_p (dest, GET_MODE (dest), &max_distance)
1316 /* Don't CSE a nop. */
1317 && ! set_noop_p (set)
1318 /* Don't GCSE if it has attached REG_EQUIV note.
1319 At this point this only function parameters should have
1320 REG_EQUIV notes and if the argument slot is used somewhere
1321 explicitly, it means address of parameter has been taken,
1322 so we should not extend the lifetime of the pseudo. */
1323 && ((note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) == 0
1324 || ! MEM_P (XEXP (note, 0))))
1325 {
1326 /* Stores are never anticipatable. */
1327 int antic_p = 0;
1328 /* An expression is not available if its operands are
1329 subsequently modified, including this insn. It's also not
1330 available if this is a branch, because we can't insert
1331 a set after the branch. */
1332 int avail_p = oprs_available_p (dest, insn) && ! JUMP_P (insn);
1333
1334 /* Record the memory expression (DEST) in the hash table. */
1335 insert_expr_in_table (dest, GET_MODE (dest), insn,
1336 antic_p, avail_p, max_distance, table);
1337 }
1338 }
1339 }
1340
1341 static void
hash_scan_clobber(rtx x ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,struct gcse_hash_table_d * table ATTRIBUTE_UNUSED)1342 hash_scan_clobber (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1343 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1344 {
1345 /* Currently nothing to do. */
1346 }
1347
1348 static void
hash_scan_call(rtx x ATTRIBUTE_UNUSED,rtx_insn * insn ATTRIBUTE_UNUSED,struct gcse_hash_table_d * table ATTRIBUTE_UNUSED)1349 hash_scan_call (rtx x ATTRIBUTE_UNUSED, rtx_insn *insn ATTRIBUTE_UNUSED,
1350 struct gcse_hash_table_d *table ATTRIBUTE_UNUSED)
1351 {
1352 /* Currently nothing to do. */
1353 }
1354
1355 /* Process INSN and add hash table entries as appropriate. */
1356
1357 static void
hash_scan_insn(rtx_insn * insn,struct gcse_hash_table_d * table)1358 hash_scan_insn (rtx_insn *insn, struct gcse_hash_table_d *table)
1359 {
1360 rtx pat = PATTERN (insn);
1361 int i;
1362
1363 /* Pick out the sets of INSN and for other forms of instructions record
1364 what's been modified. */
1365
1366 if (GET_CODE (pat) == SET)
1367 hash_scan_set (pat, insn, table);
1368
1369 else if (GET_CODE (pat) == CLOBBER)
1370 hash_scan_clobber (pat, insn, table);
1371
1372 else if (GET_CODE (pat) == CALL)
1373 hash_scan_call (pat, insn, table);
1374
1375 else if (GET_CODE (pat) == PARALLEL)
1376 for (i = 0; i < XVECLEN (pat, 0); i++)
1377 {
1378 rtx x = XVECEXP (pat, 0, i);
1379
1380 if (GET_CODE (x) == SET)
1381 hash_scan_set (x, insn, table);
1382 else if (GET_CODE (x) == CLOBBER)
1383 hash_scan_clobber (x, insn, table);
1384 else if (GET_CODE (x) == CALL)
1385 hash_scan_call (x, insn, table);
1386 }
1387 }
1388
1389 /* Dump the hash table TABLE to file FILE under the name NAME. */
1390
1391 static void
dump_hash_table(FILE * file,const char * name,struct gcse_hash_table_d * table)1392 dump_hash_table (FILE *file, const char *name, struct gcse_hash_table_d *table)
1393 {
1394 int i;
1395 /* Flattened out table, so it's printed in proper order. */
1396 struct gcse_expr **flat_table;
1397 unsigned int *hash_val;
1398 struct gcse_expr *expr;
1399
1400 flat_table = XCNEWVEC (struct gcse_expr *, table->n_elems);
1401 hash_val = XNEWVEC (unsigned int, table->n_elems);
1402
1403 for (i = 0; i < (int) table->size; i++)
1404 for (expr = table->table[i]; expr != NULL; expr = expr->next_same_hash)
1405 {
1406 flat_table[expr->bitmap_index] = expr;
1407 hash_val[expr->bitmap_index] = i;
1408 }
1409
1410 fprintf (file, "%s hash table (%d buckets, %d entries)\n",
1411 name, table->size, table->n_elems);
1412
1413 for (i = 0; i < (int) table->n_elems; i++)
1414 if (flat_table[i] != 0)
1415 {
1416 expr = flat_table[i];
1417 fprintf (file, "Index %d (hash value %d; max distance "
1418 HOST_WIDE_INT_PRINT_DEC ")\n ",
1419 expr->bitmap_index, hash_val[i], expr->max_distance);
1420 print_rtl (file, expr->expr);
1421 fprintf (file, "\n");
1422 }
1423
1424 fprintf (file, "\n");
1425
1426 free (flat_table);
1427 free (hash_val);
1428 }
1429
1430 /* Record register first/last/block set information for REGNO in INSN.
1431
1432 first_set records the first place in the block where the register
1433 is set and is used to compute "anticipatability".
1434
1435 last_set records the last place in the block where the register
1436 is set and is used to compute "availability".
1437
1438 last_bb records the block for which first_set and last_set are
1439 valid, as a quick test to invalidate them. */
1440
1441 static void
record_last_reg_set_info(rtx_insn * insn,int regno)1442 record_last_reg_set_info (rtx_insn *insn, int regno)
1443 {
1444 struct reg_avail_info *info = ®_avail_info[regno];
1445 int luid = DF_INSN_LUID (insn);
1446
1447 info->last_set = luid;
1448 if (info->last_bb != current_bb)
1449 {
1450 info->last_bb = current_bb;
1451 info->first_set = luid;
1452 }
1453 }
1454
1455 /* Record memory modification information for INSN. We do not actually care
1456 about the memory location(s) that are set, or even how they are set (consider
1457 a CALL_INSN). We merely need to record which insns modify memory. */
1458
1459 static void
record_last_mem_set_info(rtx_insn * insn)1460 record_last_mem_set_info (rtx_insn *insn)
1461 {
1462 if (! flag_gcse_lm)
1463 return;
1464
1465 record_last_mem_set_info_common (insn, modify_mem_list,
1466 canon_modify_mem_list,
1467 modify_mem_list_set,
1468 blocks_with_calls);
1469 }
1470
1471 /* Called from compute_hash_table via note_stores to handle one
1472 SET or CLOBBER in an insn. DATA is really the instruction in which
1473 the SET is taking place. */
1474
1475 static void
record_last_set_info(rtx dest,const_rtx setter ATTRIBUTE_UNUSED,void * data)1476 record_last_set_info (rtx dest, const_rtx setter ATTRIBUTE_UNUSED, void *data)
1477 {
1478 rtx_insn *last_set_insn = (rtx_insn *) data;
1479
1480 if (GET_CODE (dest) == SUBREG)
1481 dest = SUBREG_REG (dest);
1482
1483 if (REG_P (dest))
1484 record_last_reg_set_info (last_set_insn, REGNO (dest));
1485 else if (MEM_P (dest)
1486 /* Ignore pushes, they clobber nothing. */
1487 && ! push_operand (dest, GET_MODE (dest)))
1488 record_last_mem_set_info (last_set_insn);
1489 }
1490
1491 /* Top level function to create an expression hash table.
1492
1493 Expression entries are placed in the hash table if
1494 - they are of the form (set (pseudo-reg) src),
1495 - src is something we want to perform GCSE on,
1496 - none of the operands are subsequently modified in the block
1497
1498 Currently src must be a pseudo-reg or a const_int.
1499
1500 TABLE is the table computed. */
1501
1502 static void
compute_hash_table_work(struct gcse_hash_table_d * table)1503 compute_hash_table_work (struct gcse_hash_table_d *table)
1504 {
1505 int i;
1506
1507 /* re-Cache any INSN_LIST nodes we have allocated. */
1508 clear_modify_mem_tables ();
1509 /* Some working arrays used to track first and last set in each block. */
1510 reg_avail_info = GNEWVEC (struct reg_avail_info, max_reg_num ());
1511
1512 for (i = 0; i < max_reg_num (); ++i)
1513 reg_avail_info[i].last_bb = NULL;
1514
1515 FOR_EACH_BB_FN (current_bb, cfun)
1516 {
1517 rtx_insn *insn;
1518 unsigned int regno;
1519
1520 /* First pass over the instructions records information used to
1521 determine when registers and memory are first and last set. */
1522 FOR_BB_INSNS (current_bb, insn)
1523 {
1524 if (!NONDEBUG_INSN_P (insn))
1525 continue;
1526
1527 if (CALL_P (insn))
1528 {
1529 hard_reg_set_iterator hrsi;
1530
1531 /* We don't track modes of hard registers, so we need
1532 to be conservative and assume that partial kills
1533 are full kills. */
1534 HARD_REG_SET callee_clobbers
1535 = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
1536 EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
1537 record_last_reg_set_info (insn, regno);
1538
1539 if (! RTL_CONST_OR_PURE_CALL_P (insn)
1540 || RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)
1541 || can_throw_external (insn))
1542 record_last_mem_set_info (insn);
1543 }
1544
1545 note_stores (insn, record_last_set_info, insn);
1546 }
1547
1548 /* The next pass builds the hash table. */
1549 FOR_BB_INSNS (current_bb, insn)
1550 if (NONDEBUG_INSN_P (insn))
1551 hash_scan_insn (insn, table);
1552 }
1553
1554 free (reg_avail_info);
1555 reg_avail_info = NULL;
1556 }
1557
1558 /* Allocate space for the set/expr hash TABLE.
1559 It is used to determine the number of buckets to use. */
1560
1561 static void
alloc_hash_table(struct gcse_hash_table_d * table)1562 alloc_hash_table (struct gcse_hash_table_d *table)
1563 {
1564 int n;
1565
1566 n = get_max_insn_count ();
1567
1568 table->size = n / 4;
1569 if (table->size < 11)
1570 table->size = 11;
1571
1572 /* Attempt to maintain efficient use of hash table.
1573 Making it an odd number is simplest for now.
1574 ??? Later take some measurements. */
1575 table->size |= 1;
1576 n = table->size * sizeof (struct gcse_expr *);
1577 table->table = GNEWVAR (struct gcse_expr *, n);
1578 }
1579
1580 /* Free things allocated by alloc_hash_table. */
1581
1582 static void
free_hash_table(struct gcse_hash_table_d * table)1583 free_hash_table (struct gcse_hash_table_d *table)
1584 {
1585 free (table->table);
1586 }
1587
1588 /* Compute the expression hash table TABLE. */
1589
1590 static void
compute_hash_table(struct gcse_hash_table_d * table)1591 compute_hash_table (struct gcse_hash_table_d *table)
1592 {
1593 /* Initialize count of number of entries in hash table. */
1594 table->n_elems = 0;
1595 memset (table->table, 0, table->size * sizeof (struct gcse_expr *));
1596
1597 compute_hash_table_work (table);
1598 }
1599
1600 /* Expression tracking support. */
1601
1602 /* Clear canon_modify_mem_list and modify_mem_list tables. */
1603 static void
clear_modify_mem_tables(void)1604 clear_modify_mem_tables (void)
1605 {
1606 unsigned i;
1607 bitmap_iterator bi;
1608
1609 EXECUTE_IF_SET_IN_BITMAP (modify_mem_list_set, 0, i, bi)
1610 {
1611 modify_mem_list[i].release ();
1612 canon_modify_mem_list[i].release ();
1613 }
1614 bitmap_clear (modify_mem_list_set);
1615 bitmap_clear (blocks_with_calls);
1616 }
1617
1618 /* Release memory used by modify_mem_list_set. */
1619
1620 static void
free_modify_mem_tables(void)1621 free_modify_mem_tables (void)
1622 {
1623 clear_modify_mem_tables ();
1624 free (modify_mem_list);
1625 free (canon_modify_mem_list);
1626 modify_mem_list = 0;
1627 canon_modify_mem_list = 0;
1628 }
1629
1630 /* Compute PRE+LCM working variables. */
1631
1632 /* Local properties of expressions. */
1633
1634 /* Nonzero for expressions that are transparent in the block. */
1635 static sbitmap *transp;
1636
1637 /* Nonzero for expressions that are computed (available) in the block. */
1638 static sbitmap *comp;
1639
1640 /* Nonzero for expressions that are locally anticipatable in the block. */
1641 static sbitmap *antloc;
1642
1643 /* Nonzero for expressions where this block is an optimal computation
1644 point. */
1645 static sbitmap *pre_optimal;
1646
1647 /* Nonzero for expressions which are redundant in a particular block. */
1648 static sbitmap *pre_redundant;
1649
1650 /* Nonzero for expressions which should be inserted on a specific edge. */
1651 static sbitmap *pre_insert_map;
1652
1653 /* Nonzero for expressions which should be deleted in a specific block. */
1654 static sbitmap *pre_delete_map;
1655
1656 /* Allocate vars used for PRE analysis. */
1657
1658 static void
alloc_pre_mem(int n_blocks,int n_exprs)1659 alloc_pre_mem (int n_blocks, int n_exprs)
1660 {
1661 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
1662 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
1663 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
1664
1665 pre_optimal = NULL;
1666 pre_redundant = NULL;
1667 pre_insert_map = NULL;
1668 pre_delete_map = NULL;
1669 ae_kill = sbitmap_vector_alloc (n_blocks, n_exprs);
1670
1671 /* pre_insert and pre_delete are allocated later. */
1672 }
1673
1674 /* Free vars used for PRE analysis. */
1675
1676 static void
free_pre_mem(void)1677 free_pre_mem (void)
1678 {
1679 sbitmap_vector_free (transp);
1680 sbitmap_vector_free (comp);
1681
1682 /* ANTLOC and AE_KILL are freed just after pre_lcm finishes. */
1683
1684 if (pre_optimal)
1685 sbitmap_vector_free (pre_optimal);
1686 if (pre_redundant)
1687 sbitmap_vector_free (pre_redundant);
1688 if (pre_insert_map)
1689 sbitmap_vector_free (pre_insert_map);
1690 if (pre_delete_map)
1691 sbitmap_vector_free (pre_delete_map);
1692
1693 transp = comp = NULL;
1694 pre_optimal = pre_redundant = pre_insert_map = pre_delete_map = NULL;
1695 }
1696
1697 /* Remove certain expressions from anticipatable and transparent
1698 sets of basic blocks that have incoming abnormal edge.
1699 For PRE remove potentially trapping expressions to avoid placing
1700 them on abnormal edges. For hoisting remove memory references that
1701 can be clobbered by calls. */
1702
1703 static void
prune_expressions(bool pre_p)1704 prune_expressions (bool pre_p)
1705 {
1706 struct gcse_expr *expr;
1707 unsigned int ui;
1708 basic_block bb;
1709
1710 auto_sbitmap prune_exprs (expr_hash_table.n_elems);
1711 bitmap_clear (prune_exprs);
1712 for (ui = 0; ui < expr_hash_table.size; ui++)
1713 {
1714 for (expr = expr_hash_table.table[ui]; expr; expr = expr->next_same_hash)
1715 {
1716 /* Note potentially trapping expressions. */
1717 if (may_trap_p (expr->expr))
1718 {
1719 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1720 continue;
1721 }
1722
1723 if (!pre_p && contains_mem_rtx_p (expr->expr))
1724 /* Note memory references that can be clobbered by a call.
1725 We do not split abnormal edges in hoisting, so would
1726 a memory reference get hoisted along an abnormal edge,
1727 it would be placed /before/ the call. Therefore, only
1728 constant memory references can be hoisted along abnormal
1729 edges. */
1730 {
1731 rtx x = expr->expr;
1732
1733 /* Common cases where we might find the MEM which may allow us
1734 to avoid pruning the expression. */
1735 while (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
1736 x = XEXP (x, 0);
1737
1738 /* If we found the MEM, go ahead and look at it to see if it has
1739 properties that allow us to avoid pruning its expression out
1740 of the tables. */
1741 if (MEM_P (x))
1742 {
1743 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
1744 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
1745 continue;
1746
1747 if (MEM_READONLY_P (x)
1748 && !MEM_VOLATILE_P (x)
1749 && MEM_NOTRAP_P (x))
1750 /* Constant memory reference, e.g., a PIC address. */
1751 continue;
1752 }
1753
1754 /* ??? Optimally, we would use interprocedural alias
1755 analysis to determine if this mem is actually killed
1756 by this call. */
1757
1758 bitmap_set_bit (prune_exprs, expr->bitmap_index);
1759 }
1760 }
1761 }
1762
1763 FOR_EACH_BB_FN (bb, cfun)
1764 {
1765 edge e;
1766 edge_iterator ei;
1767
1768 /* If the current block is the destination of an abnormal edge, we
1769 kill all trapping (for PRE) and memory (for hoist) expressions
1770 because we won't be able to properly place the instruction on
1771 the edge. So make them neither anticipatable nor transparent.
1772 This is fairly conservative.
1773
1774 ??? For hoisting it may be necessary to check for set-and-jump
1775 instructions here, not just for abnormal edges. The general problem
1776 is that when an expression cannot not be placed right at the end of
1777 a basic block we should account for any side-effects of a subsequent
1778 jump instructions that could clobber the expression. It would
1779 be best to implement this check along the lines of
1780 should_hoist_expr_to_dom where the target block is already known
1781 and, hence, there's no need to conservatively prune expressions on
1782 "intermediate" set-and-jump instructions. */
1783 FOR_EACH_EDGE (e, ei, bb->preds)
1784 if ((e->flags & EDGE_ABNORMAL)
1785 && (pre_p || CALL_P (BB_END (e->src))))
1786 {
1787 bitmap_and_compl (antloc[bb->index],
1788 antloc[bb->index], prune_exprs);
1789 bitmap_and_compl (transp[bb->index],
1790 transp[bb->index], prune_exprs);
1791 break;
1792 }
1793 }
1794 }
1795
1796 /* It may be necessary to insert a large number of insns on edges to
1797 make the existing occurrences of expressions fully redundant. This
1798 routine examines the set of insertions and deletions and if the ratio
1799 of insertions to deletions is too high for a particular expression, then
1800 the expression is removed from the insertion/deletion sets.
1801
1802 N_ELEMS is the number of elements in the hash table. */
1803
1804 static void
prune_insertions_deletions(int n_elems)1805 prune_insertions_deletions (int n_elems)
1806 {
1807 sbitmap_iterator sbi;
1808
1809 /* We always use I to iterate over blocks/edges and J to iterate over
1810 expressions. */
1811 unsigned int i, j;
1812
1813 /* Counts for the number of times an expression needs to be inserted and
1814 number of times an expression can be removed as a result. */
1815 int *insertions = GCNEWVEC (int, n_elems);
1816 int *deletions = GCNEWVEC (int, n_elems);
1817
1818 /* Set of expressions which require too many insertions relative to
1819 the number of deletions achieved. We will prune these out of the
1820 insertion/deletion sets. */
1821 auto_sbitmap prune_exprs (n_elems);
1822 bitmap_clear (prune_exprs);
1823
1824 /* Iterate over the edges counting the number of times each expression
1825 needs to be inserted. */
1826 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1827 {
1828 EXECUTE_IF_SET_IN_BITMAP (pre_insert_map[i], 0, j, sbi)
1829 insertions[j]++;
1830 }
1831
1832 /* Similarly for deletions, but those occur in blocks rather than on
1833 edges. */
1834 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1835 {
1836 EXECUTE_IF_SET_IN_BITMAP (pre_delete_map[i], 0, j, sbi)
1837 deletions[j]++;
1838 }
1839
1840 /* Now that we have accurate counts, iterate over the elements in the
1841 hash table and see if any need too many insertions relative to the
1842 number of evaluations that can be removed. If so, mark them in
1843 PRUNE_EXPRS. */
1844 for (j = 0; j < (unsigned) n_elems; j++)
1845 if (deletions[j]
1846 && (insertions[j] / deletions[j]) > param_max_gcse_insertion_ratio)
1847 bitmap_set_bit (prune_exprs, j);
1848
1849 /* Now prune PRE_INSERT_MAP and PRE_DELETE_MAP based on PRUNE_EXPRS. */
1850 EXECUTE_IF_SET_IN_BITMAP (prune_exprs, 0, j, sbi)
1851 {
1852 for (i = 0; i < (unsigned) n_edges_for_fn (cfun); i++)
1853 bitmap_clear_bit (pre_insert_map[i], j);
1854
1855 for (i = 0; i < (unsigned) last_basic_block_for_fn (cfun); i++)
1856 bitmap_clear_bit (pre_delete_map[i], j);
1857 }
1858
1859 free (insertions);
1860 free (deletions);
1861 }
1862
1863 /* Top level routine to do the dataflow analysis needed by PRE. */
1864
1865 static struct edge_list *
compute_pre_data(void)1866 compute_pre_data (void)
1867 {
1868 struct edge_list *edge_list;
1869 basic_block bb;
1870
1871 compute_local_properties (transp, comp, antloc, &expr_hash_table);
1872 prune_expressions (true);
1873 bitmap_vector_clear (ae_kill, last_basic_block_for_fn (cfun));
1874
1875 /* Compute ae_kill for each basic block using:
1876
1877 ~(TRANSP | COMP)
1878 */
1879
1880 FOR_EACH_BB_FN (bb, cfun)
1881 {
1882 bitmap_ior (ae_kill[bb->index], transp[bb->index], comp[bb->index]);
1883 bitmap_not (ae_kill[bb->index], ae_kill[bb->index]);
1884 }
1885
1886 edge_list = pre_edge_lcm (expr_hash_table.n_elems, transp, comp, antloc,
1887 ae_kill, &pre_insert_map, &pre_delete_map);
1888 sbitmap_vector_free (antloc);
1889 antloc = NULL;
1890 sbitmap_vector_free (ae_kill);
1891 ae_kill = NULL;
1892
1893 prune_insertions_deletions (expr_hash_table.n_elems);
1894
1895 return edge_list;
1896 }
1897
1898 /* PRE utilities */
1899
1900 /* Return nonzero if an occurrence of expression EXPR in OCCR_BB would reach
1901 block BB.
1902
1903 VISITED is a pointer to a working buffer for tracking which BB's have
1904 been visited. It is NULL for the top-level call.
1905
1906 We treat reaching expressions that go through blocks containing the same
1907 reaching expression as "not reaching". E.g. if EXPR is generated in blocks
1908 2 and 3, INSN is in block 4, and 2->3->4, we treat the expression in block
1909 2 as not reaching. The intent is to improve the probability of finding
1910 only one reaching expression and to reduce register lifetimes by picking
1911 the closest such expression. */
1912
1913 static int
pre_expr_reaches_here_p_work(basic_block occr_bb,struct gcse_expr * expr,basic_block bb,char * visited)1914 pre_expr_reaches_here_p_work (basic_block occr_bb, struct gcse_expr *expr,
1915 basic_block bb, char *visited)
1916 {
1917 edge pred;
1918 edge_iterator ei;
1919
1920 FOR_EACH_EDGE (pred, ei, bb->preds)
1921 {
1922 basic_block pred_bb = pred->src;
1923
1924 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1925 /* Has predecessor has already been visited? */
1926 || visited[pred_bb->index])
1927 ;/* Nothing to do. */
1928
1929 /* Does this predecessor generate this expression? */
1930 else if (bitmap_bit_p (comp[pred_bb->index], expr->bitmap_index))
1931 {
1932 /* Is this the occurrence we're looking for?
1933 Note that there's only one generating occurrence per block
1934 so we just need to check the block number. */
1935 if (occr_bb == pred_bb)
1936 return 1;
1937
1938 visited[pred_bb->index] = 1;
1939 }
1940 /* Ignore this predecessor if it kills the expression. */
1941 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
1942 visited[pred_bb->index] = 1;
1943
1944 /* Neither gen nor kill. */
1945 else
1946 {
1947 visited[pred_bb->index] = 1;
1948 if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
1949 return 1;
1950 }
1951 }
1952
1953 /* All paths have been checked. */
1954 return 0;
1955 }
1956
1957 /* The wrapper for pre_expr_reaches_here_work that ensures that any
1958 memory allocated for that function is returned. */
1959
1960 static int
pre_expr_reaches_here_p(basic_block occr_bb,struct gcse_expr * expr,basic_block bb)1961 pre_expr_reaches_here_p (basic_block occr_bb, struct gcse_expr *expr, basic_block bb)
1962 {
1963 int rval;
1964 char *visited = XCNEWVEC (char, last_basic_block_for_fn (cfun));
1965
1966 rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
1967
1968 free (visited);
1969 return rval;
1970 }
1971
1972 /* Generate RTL to copy an EXP to REG and return it. */
1973
1974 rtx_insn *
prepare_copy_insn(rtx reg,rtx exp)1975 prepare_copy_insn (rtx reg, rtx exp)
1976 {
1977 rtx_insn *pat;
1978
1979 start_sequence ();
1980
1981 /* If the expression is something that's an operand, like a constant,
1982 just copy it to a register. */
1983 if (general_operand (exp, GET_MODE (reg)))
1984 emit_move_insn (reg, exp);
1985
1986 /* Otherwise, make a new insn to compute this expression and make sure the
1987 insn will be recognized (this also adds any needed CLOBBERs). */
1988 else
1989 {
1990 rtx_insn *insn = emit_insn (gen_rtx_SET (reg, exp));
1991
1992 if (insn_invalid_p (insn, false))
1993 gcc_unreachable ();
1994 }
1995
1996 pat = get_insns ();
1997 end_sequence ();
1998
1999 return pat;
2000 }
2001
2002 /* Generate RTL to copy an EXPR to its `reaching_reg' and return it. */
2003
2004 static rtx_insn *
process_insert_insn(struct gcse_expr * expr)2005 process_insert_insn (struct gcse_expr *expr)
2006 {
2007 rtx reg = expr->reaching_reg;
2008 /* Copy the expression to make sure we don't have any sharing issues. */
2009 rtx exp = copy_rtx (expr->expr);
2010
2011 return prepare_copy_insn (reg, exp);
2012 }
2013
2014 /* Add EXPR to the end of basic block BB.
2015
2016 This is used by both the PRE and code hoisting. */
2017
2018 static void
insert_insn_end_basic_block(struct gcse_expr * expr,basic_block bb)2019 insert_insn_end_basic_block (struct gcse_expr *expr, basic_block bb)
2020 {
2021 rtx_insn *insn = BB_END (bb);
2022 rtx_insn *new_insn;
2023 rtx reg = expr->reaching_reg;
2024 int regno = REGNO (reg);
2025 rtx_insn *pat, *pat_end;
2026
2027 pat = process_insert_insn (expr);
2028 gcc_assert (pat && INSN_P (pat));
2029
2030 pat_end = pat;
2031 while (NEXT_INSN (pat_end) != NULL_RTX)
2032 pat_end = NEXT_INSN (pat_end);
2033
2034 /* If the last insn is a jump, insert EXPR in front. Similarly we need to
2035 take care of trapping instructions in presence of non-call exceptions. */
2036
2037 if (JUMP_P (insn)
2038 || (NONJUMP_INSN_P (insn)
2039 && (!single_succ_p (bb)
2040 || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
2041 {
2042 /* FIXME: What if something in jump uses value set in new insn? */
2043 new_insn = emit_insn_before_noloc (pat, insn, bb);
2044 }
2045
2046 /* Likewise if the last insn is a call, as will happen in the presence
2047 of exception handling. */
2048 else if (CALL_P (insn)
2049 && (!single_succ_p (bb)
2050 || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
2051 {
2052 /* Keeping in mind targets with small register classes and parameters
2053 in registers, we search backward and place the instructions before
2054 the first parameter is loaded. Do this for everyone for consistency
2055 and a presumption that we'll get better code elsewhere as well. */
2056
2057 /* Since different machines initialize their parameter registers
2058 in different orders, assume nothing. Collect the set of all
2059 parameter registers. */
2060 insn = find_first_parameter_load (insn, BB_HEAD (bb));
2061
2062 /* If we found all the parameter loads, then we want to insert
2063 before the first parameter load.
2064
2065 If we did not find all the parameter loads, then we might have
2066 stopped on the head of the block, which could be a CODE_LABEL.
2067 If we inserted before the CODE_LABEL, then we would be putting
2068 the insn in the wrong basic block. In that case, put the insn
2069 after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
2070 while (LABEL_P (insn)
2071 || NOTE_INSN_BASIC_BLOCK_P (insn))
2072 insn = NEXT_INSN (insn);
2073
2074 new_insn = emit_insn_before_noloc (pat, insn, bb);
2075 }
2076 else
2077 new_insn = emit_insn_after_noloc (pat, insn, bb);
2078
2079 while (1)
2080 {
2081 if (INSN_P (pat))
2082 add_label_notes (PATTERN (pat), new_insn);
2083 if (pat == pat_end)
2084 break;
2085 pat = NEXT_INSN (pat);
2086 }
2087
2088 gcse_create_count++;
2089
2090 if (dump_file)
2091 {
2092 fprintf (dump_file, "PRE/HOIST: end of bb %d, insn %d, ",
2093 bb->index, INSN_UID (new_insn));
2094 fprintf (dump_file, "copying expression %d to reg %d\n",
2095 expr->bitmap_index, regno);
2096 }
2097 }
2098
2099 /* Insert partially redundant expressions on edges in the CFG to make
2100 the expressions fully redundant. */
2101
2102 static int
pre_edge_insert(struct edge_list * edge_list,struct gcse_expr ** index_map)2103 pre_edge_insert (struct edge_list *edge_list, struct gcse_expr **index_map)
2104 {
2105 int e, i, j, num_edges, set_size, did_insert = 0;
2106 sbitmap *inserted;
2107
2108 /* Where PRE_INSERT_MAP is nonzero, we add the expression on that edge
2109 if it reaches any of the deleted expressions. */
2110
2111 set_size = pre_insert_map[0]->size;
2112 num_edges = NUM_EDGES (edge_list);
2113 inserted = sbitmap_vector_alloc (num_edges, expr_hash_table.n_elems);
2114 bitmap_vector_clear (inserted, num_edges);
2115
2116 for (e = 0; e < num_edges; e++)
2117 {
2118 int indx;
2119 basic_block bb = INDEX_EDGE_PRED_BB (edge_list, e);
2120
2121 for (i = indx = 0; i < set_size; i++, indx += SBITMAP_ELT_BITS)
2122 {
2123 SBITMAP_ELT_TYPE insert = pre_insert_map[e]->elms[i];
2124
2125 for (j = indx;
2126 insert && j < (int) expr_hash_table.n_elems;
2127 j++, insert >>= 1)
2128 if ((insert & 1) != 0 && index_map[j]->reaching_reg != NULL_RTX)
2129 {
2130 struct gcse_expr *expr = index_map[j];
2131 struct gcse_occr *occr;
2132
2133 /* Now look at each deleted occurrence of this expression. */
2134 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2135 {
2136 if (! occr->deleted_p)
2137 continue;
2138
2139 /* Insert this expression on this edge if it would
2140 reach the deleted occurrence in BB. */
2141 if (!bitmap_bit_p (inserted[e], j))
2142 {
2143 rtx_insn *insn;
2144 edge eg = INDEX_EDGE (edge_list, e);
2145
2146 /* We can't insert anything on an abnormal and
2147 critical edge, so we insert the insn at the end of
2148 the previous block. There are several alternatives
2149 detailed in Morgans book P277 (sec 10.5) for
2150 handling this situation. This one is easiest for
2151 now. */
2152
2153 if (eg->flags & EDGE_ABNORMAL)
2154 insert_insn_end_basic_block (index_map[j], bb);
2155 else
2156 {
2157 insn = process_insert_insn (index_map[j]);
2158 insert_insn_on_edge (insn, eg);
2159 }
2160
2161 if (dump_file)
2162 {
2163 fprintf (dump_file, "PRE: edge (%d,%d), ",
2164 bb->index,
2165 INDEX_EDGE_SUCC_BB (edge_list, e)->index);
2166 fprintf (dump_file, "copy expression %d\n",
2167 expr->bitmap_index);
2168 }
2169
2170 update_ld_motion_stores (expr);
2171 bitmap_set_bit (inserted[e], j);
2172 did_insert = 1;
2173 gcse_create_count++;
2174 }
2175 }
2176 }
2177 }
2178 }
2179
2180 sbitmap_vector_free (inserted);
2181 return did_insert;
2182 }
2183
2184 /* Copy the result of EXPR->EXPR generated by INSN to EXPR->REACHING_REG.
2185 Given "old_reg <- expr" (INSN), instead of adding after it
2186 reaching_reg <- old_reg
2187 it's better to do the following:
2188 reaching_reg <- expr
2189 old_reg <- reaching_reg
2190 because this way copy propagation can discover additional PRE
2191 opportunities. But if this fails, we try the old way.
2192 When "expr" is a store, i.e.
2193 given "MEM <- old_reg", instead of adding after it
2194 reaching_reg <- old_reg
2195 it's better to add it before as follows:
2196 reaching_reg <- old_reg
2197 MEM <- reaching_reg. */
2198
2199 static void
pre_insert_copy_insn(struct gcse_expr * expr,rtx_insn * insn)2200 pre_insert_copy_insn (struct gcse_expr *expr, rtx_insn *insn)
2201 {
2202 rtx reg = expr->reaching_reg;
2203 int regno = REGNO (reg);
2204 int indx = expr->bitmap_index;
2205 rtx pat = PATTERN (insn);
2206 rtx set, first_set;
2207 rtx_insn *new_insn;
2208 rtx old_reg;
2209 int i;
2210
2211 /* This block matches the logic in hash_scan_insn. */
2212 switch (GET_CODE (pat))
2213 {
2214 case SET:
2215 set = pat;
2216 break;
2217
2218 case PARALLEL:
2219 /* Search through the parallel looking for the set whose
2220 source was the expression that we're interested in. */
2221 first_set = NULL_RTX;
2222 set = NULL_RTX;
2223 for (i = 0; i < XVECLEN (pat, 0); i++)
2224 {
2225 rtx x = XVECEXP (pat, 0, i);
2226 if (GET_CODE (x) == SET)
2227 {
2228 /* If the source was a REG_EQUAL or REG_EQUIV note, we
2229 may not find an equivalent expression, but in this
2230 case the PARALLEL will have a single set. */
2231 if (first_set == NULL_RTX)
2232 first_set = x;
2233 if (expr_equiv_p (SET_SRC (x), expr->expr))
2234 {
2235 set = x;
2236 break;
2237 }
2238 }
2239 }
2240
2241 gcc_assert (first_set);
2242 if (set == NULL_RTX)
2243 set = first_set;
2244 break;
2245
2246 default:
2247 gcc_unreachable ();
2248 }
2249
2250 if (REG_P (SET_DEST (set)))
2251 {
2252 old_reg = SET_DEST (set);
2253 /* Check if we can modify the set destination in the original insn. */
2254 if (validate_change (insn, &SET_DEST (set), reg, 0))
2255 {
2256 new_insn = gen_move_insn (old_reg, reg);
2257 new_insn = emit_insn_after (new_insn, insn);
2258 }
2259 else
2260 {
2261 new_insn = gen_move_insn (reg, old_reg);
2262 new_insn = emit_insn_after (new_insn, insn);
2263 }
2264 }
2265 else /* This is possible only in case of a store to memory. */
2266 {
2267 old_reg = SET_SRC (set);
2268 new_insn = gen_move_insn (reg, old_reg);
2269
2270 /* Check if we can modify the set source in the original insn. */
2271 if (validate_change (insn, &SET_SRC (set), reg, 0))
2272 new_insn = emit_insn_before (new_insn, insn);
2273 else
2274 new_insn = emit_insn_after (new_insn, insn);
2275 }
2276
2277 gcse_create_count++;
2278
2279 if (dump_file)
2280 fprintf (dump_file,
2281 "PRE: bb %d, insn %d, copy expression %d in insn %d to reg %d\n",
2282 BLOCK_FOR_INSN (insn)->index, INSN_UID (new_insn), indx,
2283 INSN_UID (insn), regno);
2284 }
2285
2286 /* Copy available expressions that reach the redundant expression
2287 to `reaching_reg'. */
2288
2289 static void
pre_insert_copies(void)2290 pre_insert_copies (void)
2291 {
2292 unsigned int i, added_copy;
2293 struct gcse_expr *expr;
2294 struct gcse_occr *occr;
2295 struct gcse_occr *avail;
2296
2297 /* For each available expression in the table, copy the result to
2298 `reaching_reg' if the expression reaches a deleted one.
2299
2300 ??? The current algorithm is rather brute force.
2301 Need to do some profiling. */
2302
2303 for (i = 0; i < expr_hash_table.size; i++)
2304 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2305 {
2306 /* If the basic block isn't reachable, PPOUT will be TRUE. However,
2307 we don't want to insert a copy here because the expression may not
2308 really be redundant. So only insert an insn if the expression was
2309 deleted. This test also avoids further processing if the
2310 expression wasn't deleted anywhere. */
2311 if (expr->reaching_reg == NULL)
2312 continue;
2313
2314 /* Set when we add a copy for that expression. */
2315 added_copy = 0;
2316
2317 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2318 {
2319 if (! occr->deleted_p)
2320 continue;
2321
2322 for (avail = expr->avail_occr; avail != NULL; avail = avail->next)
2323 {
2324 rtx_insn *insn = avail->insn;
2325
2326 /* No need to handle this one if handled already. */
2327 if (avail->copied_p)
2328 continue;
2329
2330 /* Don't handle this one if it's a redundant one. */
2331 if (insn->deleted ())
2332 continue;
2333
2334 /* Or if the expression doesn't reach the deleted one. */
2335 if (! pre_expr_reaches_here_p (BLOCK_FOR_INSN (avail->insn),
2336 expr,
2337 BLOCK_FOR_INSN (occr->insn)))
2338 continue;
2339
2340 added_copy = 1;
2341
2342 /* Copy the result of avail to reaching_reg. */
2343 pre_insert_copy_insn (expr, insn);
2344 avail->copied_p = 1;
2345 }
2346 }
2347
2348 if (added_copy)
2349 update_ld_motion_stores (expr);
2350 }
2351 }
2352
2353 struct set_data
2354 {
2355 rtx_insn *insn;
2356 const_rtx set;
2357 int nsets;
2358 };
2359
2360 /* Increment number of sets and record set in DATA. */
2361
2362 static void
record_set_data(rtx dest,const_rtx set,void * data)2363 record_set_data (rtx dest, const_rtx set, void *data)
2364 {
2365 struct set_data *s = (struct set_data *)data;
2366
2367 if (GET_CODE (set) == SET)
2368 {
2369 /* We allow insns having multiple sets, where all but one are
2370 dead as single set insns. In the common case only a single
2371 set is present, so we want to avoid checking for REG_UNUSED
2372 notes unless necessary. */
2373 if (s->nsets == 1
2374 && find_reg_note (s->insn, REG_UNUSED, SET_DEST (s->set))
2375 && !side_effects_p (s->set))
2376 s->nsets = 0;
2377
2378 if (!s->nsets)
2379 {
2380 /* Record this set. */
2381 s->nsets += 1;
2382 s->set = set;
2383 }
2384 else if (!find_reg_note (s->insn, REG_UNUSED, dest)
2385 || side_effects_p (set))
2386 s->nsets += 1;
2387 }
2388 }
2389
2390 static const_rtx
single_set_gcse(rtx_insn * insn)2391 single_set_gcse (rtx_insn *insn)
2392 {
2393 struct set_data s;
2394 rtx pattern;
2395
2396 gcc_assert (INSN_P (insn));
2397
2398 /* Optimize common case. */
2399 pattern = PATTERN (insn);
2400 if (GET_CODE (pattern) == SET)
2401 return pattern;
2402
2403 s.insn = insn;
2404 s.nsets = 0;
2405 note_pattern_stores (pattern, record_set_data, &s);
2406
2407 /* Considered invariant insns have exactly one set. */
2408 gcc_assert (s.nsets == 1);
2409 return s.set;
2410 }
2411
2412 /* Emit move from SRC to DEST noting the equivalence with expression computed
2413 in INSN. */
2414
2415 static rtx_insn *
gcse_emit_move_after(rtx dest,rtx src,rtx_insn * insn)2416 gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
2417 {
2418 rtx_insn *new_rtx;
2419 const_rtx set = single_set_gcse (insn);
2420 rtx set2;
2421 rtx note;
2422 rtx eqv = NULL_RTX;
2423
2424 /* This should never fail since we're creating a reg->reg copy
2425 we've verified to be valid. */
2426
2427 new_rtx = emit_insn_after (gen_move_insn (dest, src), insn);
2428
2429 /* Note the equivalence for local CSE pass. Take the note from the old
2430 set if there was one. Otherwise record the SET_SRC from the old set
2431 unless DEST is also an operand of the SET_SRC. */
2432 set2 = single_set (new_rtx);
2433 if (!set2 || !rtx_equal_p (SET_DEST (set2), dest))
2434 return new_rtx;
2435 if ((note = find_reg_equal_equiv_note (insn)))
2436 eqv = XEXP (note, 0);
2437 else if (! REG_P (dest)
2438 || ! reg_mentioned_p (dest, SET_SRC (set)))
2439 eqv = SET_SRC (set);
2440
2441 if (eqv != NULL_RTX)
2442 set_unique_reg_note (new_rtx, REG_EQUAL, copy_insn_1 (eqv));
2443
2444 return new_rtx;
2445 }
2446
2447 /* Delete redundant computations.
2448 Deletion is done by changing the insn to copy the `reaching_reg' of
2449 the expression into the result of the SET. It is left to later passes
2450 to propagate the copy or eliminate it.
2451
2452 Return nonzero if a change is made. */
2453
2454 static int
pre_delete(void)2455 pre_delete (void)
2456 {
2457 unsigned int i;
2458 int changed;
2459 struct gcse_expr *expr;
2460 struct gcse_occr *occr;
2461
2462 changed = 0;
2463 for (i = 0; i < expr_hash_table.size; i++)
2464 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2465 {
2466 int indx = expr->bitmap_index;
2467
2468 /* We only need to search antic_occr since we require ANTLOC != 0. */
2469 for (occr = expr->antic_occr; occr != NULL; occr = occr->next)
2470 {
2471 rtx_insn *insn = occr->insn;
2472 rtx set;
2473 basic_block bb = BLOCK_FOR_INSN (insn);
2474
2475 /* We only delete insns that have a single_set. */
2476 if (bitmap_bit_p (pre_delete_map[bb->index], indx)
2477 && (set = single_set (insn)) != 0
2478 && dbg_cnt (pre_insn))
2479 {
2480 /* Create a pseudo-reg to store the result of reaching
2481 expressions into. Get the mode for the new pseudo from
2482 the mode of the original destination pseudo. */
2483 if (expr->reaching_reg == NULL)
2484 expr->reaching_reg = gen_reg_rtx_and_attrs (SET_DEST (set));
2485
2486 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg, insn);
2487 delete_insn (insn);
2488 occr->deleted_p = 1;
2489 changed = 1;
2490 gcse_subst_count++;
2491
2492 if (dump_file)
2493 {
2494 fprintf (dump_file,
2495 "PRE: redundant insn %d (expression %d) in ",
2496 INSN_UID (insn), indx);
2497 fprintf (dump_file, "bb %d, reaching reg is %d\n",
2498 bb->index, REGNO (expr->reaching_reg));
2499 }
2500 }
2501 }
2502 }
2503
2504 return changed;
2505 }
2506
2507 /* Perform GCSE optimizations using PRE.
2508 This is called by one_pre_gcse_pass after all the dataflow analysis
2509 has been done.
2510
2511 This is based on the original Morel-Renvoise paper Fred Chow's thesis, and
2512 lazy code motion from Knoop, Ruthing and Steffen as described in Advanced
2513 Compiler Design and Implementation.
2514
2515 ??? A new pseudo reg is created to hold the reaching expression. The nice
2516 thing about the classical approach is that it would try to use an existing
2517 reg. If the register can't be adequately optimized [i.e. we introduce
2518 reload problems], one could add a pass here to propagate the new register
2519 through the block.
2520
2521 ??? We don't handle single sets in PARALLELs because we're [currently] not
2522 able to copy the rest of the parallel when we insert copies to create full
2523 redundancies from partial redundancies. However, there's no reason why we
2524 can't handle PARALLELs in the cases where there are no partial
2525 redundancies. */
2526
2527 static int
pre_gcse(struct edge_list * edge_list)2528 pre_gcse (struct edge_list *edge_list)
2529 {
2530 unsigned int i;
2531 int did_insert, changed;
2532 struct gcse_expr **index_map;
2533 struct gcse_expr *expr;
2534
2535 /* Compute a mapping from expression number (`bitmap_index') to
2536 hash table entry. */
2537
2538 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
2539 for (i = 0; i < expr_hash_table.size; i++)
2540 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
2541 index_map[expr->bitmap_index] = expr;
2542
2543 /* Delete the redundant insns first so that
2544 - we know what register to use for the new insns and for the other
2545 ones with reaching expressions
2546 - we know which insns are redundant when we go to create copies */
2547
2548 changed = pre_delete ();
2549 did_insert = pre_edge_insert (edge_list, index_map);
2550
2551 /* In other places with reaching expressions, copy the expression to the
2552 specially allocated pseudo-reg that reaches the redundant expr. */
2553 pre_insert_copies ();
2554 if (did_insert)
2555 {
2556 commit_edge_insertions ();
2557 changed = 1;
2558 }
2559
2560 free (index_map);
2561 return changed;
2562 }
2563
2564 /* Top level routine to perform one PRE GCSE pass.
2565
2566 Return nonzero if a change was made. */
2567
2568 static int
one_pre_gcse_pass(void)2569 one_pre_gcse_pass (void)
2570 {
2571 int changed = 0;
2572
2573 gcse_subst_count = 0;
2574 gcse_create_count = 0;
2575
2576 /* Return if there's nothing to do, or it is too expensive. */
2577 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
2578 || gcse_or_cprop_is_too_expensive (_("PRE disabled")))
2579 return 0;
2580
2581 /* We need alias. */
2582 init_alias_analysis ();
2583
2584 bytes_used = 0;
2585 gcc_obstack_init (&gcse_obstack);
2586 alloc_gcse_mem ();
2587
2588 alloc_hash_table (&expr_hash_table);
2589 add_noreturn_fake_exit_edges ();
2590 if (flag_gcse_lm)
2591 compute_ld_motion_mems ();
2592
2593 compute_hash_table (&expr_hash_table);
2594 if (flag_gcse_lm)
2595 trim_ld_motion_mems ();
2596 if (dump_file)
2597 dump_hash_table (dump_file, "Expression", &expr_hash_table);
2598
2599 if (expr_hash_table.n_elems > 0)
2600 {
2601 struct edge_list *edge_list;
2602 alloc_pre_mem (last_basic_block_for_fn (cfun), expr_hash_table.n_elems);
2603 edge_list = compute_pre_data ();
2604 changed |= pre_gcse (edge_list);
2605 free_edge_list (edge_list);
2606 free_pre_mem ();
2607 }
2608
2609 if (flag_gcse_lm)
2610 free_ld_motion_mems ();
2611 remove_fake_exit_edges ();
2612 free_hash_table (&expr_hash_table);
2613
2614 free_gcse_mem ();
2615 obstack_free (&gcse_obstack, NULL);
2616
2617 /* We are finished with alias. */
2618 end_alias_analysis ();
2619
2620 if (dump_file)
2621 {
2622 fprintf (dump_file, "PRE GCSE of %s, %d basic blocks, %d bytes needed, ",
2623 current_function_name (), n_basic_blocks_for_fn (cfun),
2624 bytes_used);
2625 fprintf (dump_file, "%d substs, %d insns created\n",
2626 gcse_subst_count, gcse_create_count);
2627 }
2628
2629 return changed;
2630 }
2631
2632 /* If X contains any LABEL_REF's, add REG_LABEL_OPERAND notes for them
2633 to INSN. If such notes are added to an insn which references a
2634 CODE_LABEL, the LABEL_NUSES count is incremented. We have to add
2635 that note, because the following loop optimization pass requires
2636 them. */
2637
2638 /* ??? If there was a jump optimization pass after gcse and before loop,
2639 then we would not need to do this here, because jump would add the
2640 necessary REG_LABEL_OPERAND and REG_LABEL_TARGET notes. */
2641
2642 static void
add_label_notes(rtx x,rtx_insn * insn)2643 add_label_notes (rtx x, rtx_insn *insn)
2644 {
2645 enum rtx_code code = GET_CODE (x);
2646 int i, j;
2647 const char *fmt;
2648
2649 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
2650 {
2651 /* This code used to ignore labels that referred to dispatch tables to
2652 avoid flow generating (slightly) worse code.
2653
2654 We no longer ignore such label references (see LABEL_REF handling in
2655 mark_jump_label for additional information). */
2656
2657 /* There's no reason for current users to emit jump-insns with
2658 such a LABEL_REF, so we don't have to handle REG_LABEL_TARGET
2659 notes. */
2660 gcc_assert (!JUMP_P (insn));
2661 add_reg_note (insn, REG_LABEL_OPERAND, label_ref_label (x));
2662
2663 if (LABEL_P (label_ref_label (x)))
2664 LABEL_NUSES (label_ref_label (x))++;
2665
2666 return;
2667 }
2668
2669 for (i = GET_RTX_LENGTH (code) - 1, fmt = GET_RTX_FORMAT (code); i >= 0; i--)
2670 {
2671 if (fmt[i] == 'e')
2672 add_label_notes (XEXP (x, i), insn);
2673 else if (fmt[i] == 'E')
2674 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2675 add_label_notes (XVECEXP (x, i, j), insn);
2676 }
2677 }
2678
2679 /* Code Hoisting variables and subroutines. */
2680
2681 /* Very busy expressions. */
2682 static sbitmap *hoist_vbein;
2683 static sbitmap *hoist_vbeout;
2684
2685 /* ??? We could compute post dominators and run this algorithm in
2686 reverse to perform tail merging, doing so would probably be
2687 more effective than the tail merging code in jump.cc.
2688
2689 It's unclear if tail merging could be run in parallel with
2690 code hoisting. It would be nice. */
2691
2692 /* Allocate vars used for code hoisting analysis. */
2693
2694 static void
alloc_code_hoist_mem(int n_blocks,int n_exprs)2695 alloc_code_hoist_mem (int n_blocks, int n_exprs)
2696 {
2697 antloc = sbitmap_vector_alloc (n_blocks, n_exprs);
2698 transp = sbitmap_vector_alloc (n_blocks, n_exprs);
2699 comp = sbitmap_vector_alloc (n_blocks, n_exprs);
2700
2701 hoist_vbein = sbitmap_vector_alloc (n_blocks, n_exprs);
2702 hoist_vbeout = sbitmap_vector_alloc (n_blocks, n_exprs);
2703 }
2704
2705 /* Free vars used for code hoisting analysis. */
2706
2707 static void
free_code_hoist_mem(void)2708 free_code_hoist_mem (void)
2709 {
2710 sbitmap_vector_free (antloc);
2711 sbitmap_vector_free (transp);
2712 sbitmap_vector_free (comp);
2713
2714 sbitmap_vector_free (hoist_vbein);
2715 sbitmap_vector_free (hoist_vbeout);
2716
2717 free_dominance_info (CDI_DOMINATORS);
2718 }
2719
2720 /* Compute the very busy expressions at entry/exit from each block.
2721
2722 An expression is very busy if all paths from a given point
2723 compute the expression. */
2724
2725 static void
compute_code_hoist_vbeinout(void)2726 compute_code_hoist_vbeinout (void)
2727 {
2728 int changed, passes;
2729 basic_block bb;
2730
2731 bitmap_vector_clear (hoist_vbeout, last_basic_block_for_fn (cfun));
2732 bitmap_vector_clear (hoist_vbein, last_basic_block_for_fn (cfun));
2733
2734 passes = 0;
2735 changed = 1;
2736
2737 while (changed)
2738 {
2739 changed = 0;
2740
2741 /* We scan the blocks in the reverse order to speed up
2742 the convergence. */
2743 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2744 {
2745 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
2746 {
2747 bitmap_intersection_of_succs (hoist_vbeout[bb->index],
2748 hoist_vbein, bb);
2749
2750 /* Include expressions in VBEout that are calculated
2751 in BB and available at its end. */
2752 bitmap_ior (hoist_vbeout[bb->index],
2753 hoist_vbeout[bb->index], comp[bb->index]);
2754 }
2755
2756 changed |= bitmap_or_and (hoist_vbein[bb->index],
2757 antloc[bb->index],
2758 hoist_vbeout[bb->index],
2759 transp[bb->index]);
2760 }
2761
2762 passes++;
2763 }
2764
2765 if (dump_file)
2766 {
2767 fprintf (dump_file, "hoisting vbeinout computation: %d passes\n", passes);
2768
2769 FOR_EACH_BB_FN (bb, cfun)
2770 {
2771 fprintf (dump_file, "vbein (%d): ", bb->index);
2772 dump_bitmap_file (dump_file, hoist_vbein[bb->index]);
2773 fprintf (dump_file, "vbeout(%d): ", bb->index);
2774 dump_bitmap_file (dump_file, hoist_vbeout[bb->index]);
2775 }
2776 }
2777 }
2778
2779 /* Top level routine to do the dataflow analysis needed by code hoisting. */
2780
2781 static void
compute_code_hoist_data(void)2782 compute_code_hoist_data (void)
2783 {
2784 compute_local_properties (transp, comp, antloc, &expr_hash_table);
2785 prune_expressions (false);
2786 compute_code_hoist_vbeinout ();
2787 calculate_dominance_info (CDI_DOMINATORS);
2788 if (dump_file)
2789 fprintf (dump_file, "\n");
2790 }
2791
2792 /* Update register pressure for BB when hoisting an expression from
2793 instruction FROM, if live ranges of inputs are shrunk. Also
2794 maintain live_in information if live range of register referred
2795 in FROM is shrunk.
2796
2797 Return 0 if register pressure doesn't change, otherwise return
2798 the number by which register pressure is decreased.
2799
2800 NOTE: Register pressure won't be increased in this function. */
2801
2802 static int
update_bb_reg_pressure(basic_block bb,rtx_insn * from)2803 update_bb_reg_pressure (basic_block bb, rtx_insn *from)
2804 {
2805 rtx dreg;
2806 rtx_insn *insn;
2807 basic_block succ_bb;
2808 df_ref use, op_ref;
2809 edge succ;
2810 edge_iterator ei;
2811 int decreased_pressure = 0;
2812 int nregs;
2813 enum reg_class pressure_class;
2814
2815 FOR_EACH_INSN_USE (use, from)
2816 {
2817 dreg = DF_REF_REAL_REG (use);
2818 /* The live range of register is shrunk only if it isn't:
2819 1. referred on any path from the end of this block to EXIT, or
2820 2. referred by insns other than FROM in this block. */
2821 FOR_EACH_EDGE (succ, ei, bb->succs)
2822 {
2823 succ_bb = succ->dest;
2824 if (succ_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
2825 continue;
2826
2827 if (bitmap_bit_p (BB_DATA (succ_bb)->live_in, REGNO (dreg)))
2828 break;
2829 }
2830 if (succ != NULL)
2831 continue;
2832
2833 op_ref = DF_REG_USE_CHAIN (REGNO (dreg));
2834 for (; op_ref; op_ref = DF_REF_NEXT_REG (op_ref))
2835 {
2836 if (!DF_REF_INSN_INFO (op_ref))
2837 continue;
2838
2839 insn = DF_REF_INSN (op_ref);
2840 if (BLOCK_FOR_INSN (insn) == bb
2841 && NONDEBUG_INSN_P (insn) && insn != from)
2842 break;
2843 }
2844
2845 pressure_class = get_regno_pressure_class (REGNO (dreg), &nregs);
2846 /* Decrease register pressure and update live_in information for
2847 this block. */
2848 if (!op_ref && pressure_class != NO_REGS)
2849 {
2850 decreased_pressure += nregs;
2851 BB_DATA (bb)->max_reg_pressure[pressure_class] -= nregs;
2852 bitmap_clear_bit (BB_DATA (bb)->live_in, REGNO (dreg));
2853 }
2854 }
2855 return decreased_pressure;
2856 }
2857
2858 /* Determine if the expression EXPR should be hoisted to EXPR_BB up in
2859 flow graph, if it can reach BB unimpared. Stop the search if the
2860 expression would need to be moved more than DISTANCE instructions.
2861
2862 DISTANCE is the number of instructions through which EXPR can be
2863 hoisted up in flow graph.
2864
2865 BB_SIZE points to an array which contains the number of instructions
2866 for each basic block.
2867
2868 PRESSURE_CLASS and NREGS are register class and number of hard registers
2869 for storing EXPR.
2870
2871 HOISTED_BBS points to a bitmap indicating basic blocks through which
2872 EXPR is hoisted.
2873
2874 FROM is the instruction from which EXPR is hoisted.
2875
2876 It's unclear exactly what Muchnick meant by "unimpared". It seems
2877 to me that the expression must either be computed or transparent in
2878 *every* block in the path(s) from EXPR_BB to BB. Any other definition
2879 would allow the expression to be hoisted out of loops, even if
2880 the expression wasn't a loop invariant.
2881
2882 Contrast this to reachability for PRE where an expression is
2883 considered reachable if *any* path reaches instead of *all*
2884 paths. */
2885
2886 static int
should_hoist_expr_to_dom(basic_block expr_bb,struct gcse_expr * expr,basic_block bb,sbitmap visited,HOST_WIDE_INT distance,int * bb_size,enum reg_class pressure_class,int * nregs,bitmap hoisted_bbs,rtx_insn * from)2887 should_hoist_expr_to_dom (basic_block expr_bb, struct gcse_expr *expr,
2888 basic_block bb, sbitmap visited,
2889 HOST_WIDE_INT distance,
2890 int *bb_size, enum reg_class pressure_class,
2891 int *nregs, bitmap hoisted_bbs, rtx_insn *from)
2892 {
2893 unsigned int i;
2894 edge pred;
2895 edge_iterator ei;
2896 sbitmap_iterator sbi;
2897 int visited_allocated_locally = 0;
2898 int decreased_pressure = 0;
2899
2900 if (flag_ira_hoist_pressure)
2901 {
2902 /* Record old information of basic block BB when it is visited
2903 at the first time. */
2904 if (!bitmap_bit_p (hoisted_bbs, bb->index))
2905 {
2906 struct bb_data *data = BB_DATA (bb);
2907 bitmap_copy (data->backup, data->live_in);
2908 data->old_pressure = data->max_reg_pressure[pressure_class];
2909 }
2910 decreased_pressure = update_bb_reg_pressure (bb, from);
2911 }
2912 /* Terminate the search if distance, for which EXPR is allowed to move,
2913 is exhausted. */
2914 if (distance > 0)
2915 {
2916 if (flag_ira_hoist_pressure)
2917 {
2918 /* Prefer to hoist EXPR if register pressure is decreased. */
2919 if (decreased_pressure > *nregs)
2920 distance += bb_size[bb->index];
2921 /* Let EXPR be hoisted through basic block at no cost if one
2922 of following conditions is satisfied:
2923
2924 1. The basic block has low register pressure.
2925 2. Register pressure won't be increases after hoisting EXPR.
2926
2927 Constant expressions is handled conservatively, because
2928 hoisting constant expression aggressively results in worse
2929 code. This decision is made by the observation of CSiBE
2930 on ARM target, while it has no obvious effect on other
2931 targets like x86, x86_64, mips and powerpc. */
2932 else if (CONST_INT_P (expr->expr)
2933 || (BB_DATA (bb)->max_reg_pressure[pressure_class]
2934 >= ira_class_hard_regs_num[pressure_class]
2935 && decreased_pressure < *nregs))
2936 distance -= bb_size[bb->index];
2937 }
2938 else
2939 distance -= bb_size[bb->index];
2940
2941 if (distance <= 0)
2942 return 0;
2943 }
2944 else
2945 gcc_assert (distance == 0);
2946
2947 if (visited == NULL)
2948 {
2949 visited_allocated_locally = 1;
2950 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
2951 bitmap_clear (visited);
2952 }
2953
2954 FOR_EACH_EDGE (pred, ei, bb->preds)
2955 {
2956 basic_block pred_bb = pred->src;
2957
2958 if (pred->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2959 break;
2960 else if (pred_bb == expr_bb)
2961 continue;
2962 else if (bitmap_bit_p (visited, pred_bb->index))
2963 continue;
2964 else if (! bitmap_bit_p (transp[pred_bb->index], expr->bitmap_index))
2965 break;
2966 /* Not killed. */
2967 else
2968 {
2969 bitmap_set_bit (visited, pred_bb->index);
2970 if (! should_hoist_expr_to_dom (expr_bb, expr, pred_bb,
2971 visited, distance, bb_size,
2972 pressure_class, nregs,
2973 hoisted_bbs, from))
2974 break;
2975 }
2976 }
2977 if (visited_allocated_locally)
2978 {
2979 /* If EXPR can be hoisted to expr_bb, record basic blocks through
2980 which EXPR is hoisted in hoisted_bbs. */
2981 if (flag_ira_hoist_pressure && !pred)
2982 {
2983 /* Record the basic block from which EXPR is hoisted. */
2984 bitmap_set_bit (visited, bb->index);
2985 EXECUTE_IF_SET_IN_BITMAP (visited, 0, i, sbi)
2986 bitmap_set_bit (hoisted_bbs, i);
2987 }
2988 sbitmap_free (visited);
2989 }
2990
2991 return (pred == NULL);
2992 }
2993
2994 /* Find occurrence in BB. */
2995
2996 static struct gcse_occr *
find_occr_in_bb(struct gcse_occr * occr,basic_block bb)2997 find_occr_in_bb (struct gcse_occr *occr, basic_block bb)
2998 {
2999 /* Find the right occurrence of this expression. */
3000 while (occr && BLOCK_FOR_INSN (occr->insn) != bb)
3001 occr = occr->next;
3002
3003 return occr;
3004 }
3005
3006 /* Actually perform code hoisting.
3007
3008 The code hoisting pass can hoist multiple computations of the same
3009 expression along dominated path to a dominating basic block, like
3010 from b2/b3 to b1 as depicted below:
3011
3012 b1 ------
3013 /\ |
3014 / \ |
3015 bx by distance
3016 / \ |
3017 / \ |
3018 b2 b3 ------
3019
3020 Unfortunately code hoisting generally extends the live range of an
3021 output pseudo register, which increases register pressure and hurts
3022 register allocation. To address this issue, an attribute MAX_DISTANCE
3023 is computed and attached to each expression. The attribute is computed
3024 from rtx cost of the corresponding expression and it's used to control
3025 how long the expression can be hoisted up in flow graph. As the
3026 expression is hoisted up in flow graph, GCC decreases its DISTANCE
3027 and stops the hoist if DISTANCE reaches 0. Code hoisting can decrease
3028 register pressure if live ranges of inputs are shrunk.
3029
3030 Option "-fira-hoist-pressure" implements register pressure directed
3031 hoist based on upper method. The rationale is:
3032 1. Calculate register pressure for each basic block by reusing IRA
3033 facility.
3034 2. When expression is hoisted through one basic block, GCC checks
3035 the change of live ranges for inputs/output. The basic block's
3036 register pressure will be increased because of extended live
3037 range of output. However, register pressure will be decreased
3038 if the live ranges of inputs are shrunk.
3039 3. After knowing how hoisting affects register pressure, GCC prefers
3040 to hoist the expression if it can decrease register pressure, by
3041 increasing DISTANCE of the corresponding expression.
3042 4. If hoisting the expression increases register pressure, GCC checks
3043 register pressure of the basic block and decrease DISTANCE only if
3044 the register pressure is high. In other words, expression will be
3045 hoisted through at no cost if the basic block has low register
3046 pressure.
3047 5. Update register pressure information for basic blocks through
3048 which expression is hoisted. */
3049
3050 static int
hoist_code(void)3051 hoist_code (void)
3052 {
3053 basic_block bb, dominated;
3054 unsigned int dom_tree_walk_index;
3055 unsigned int i, j, k;
3056 struct gcse_expr **index_map;
3057 struct gcse_expr *expr;
3058 int *to_bb_head;
3059 int *bb_size;
3060 int changed = 0;
3061 struct bb_data *data;
3062 /* Basic blocks that have occurrences reachable from BB. */
3063 bitmap from_bbs;
3064 /* Basic blocks through which expr is hoisted. */
3065 bitmap hoisted_bbs = NULL;
3066 bitmap_iterator bi;
3067
3068 /* Compute a mapping from expression number (`bitmap_index') to
3069 hash table entry. */
3070
3071 index_map = XCNEWVEC (struct gcse_expr *, expr_hash_table.n_elems);
3072 for (i = 0; i < expr_hash_table.size; i++)
3073 for (expr = expr_hash_table.table[i]; expr; expr = expr->next_same_hash)
3074 index_map[expr->bitmap_index] = expr;
3075
3076 /* Calculate sizes of basic blocks and note how far
3077 each instruction is from the start of its block. We then use this
3078 data to restrict distance an expression can travel. */
3079
3080 to_bb_head = XCNEWVEC (int, get_max_uid ());
3081 bb_size = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3082
3083 FOR_EACH_BB_FN (bb, cfun)
3084 {
3085 rtx_insn *insn;
3086 int to_head;
3087
3088 to_head = 0;
3089 FOR_BB_INSNS (bb, insn)
3090 {
3091 /* Don't count debug instructions to avoid them affecting
3092 decision choices. */
3093 if (NONDEBUG_INSN_P (insn))
3094 to_bb_head[INSN_UID (insn)] = to_head++;
3095 }
3096
3097 bb_size[bb->index] = to_head;
3098 }
3099
3100 gcc_assert (EDGE_COUNT (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) == 1
3101 && (EDGE_SUCC (ENTRY_BLOCK_PTR_FOR_FN (cfun), 0)->dest
3102 == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb));
3103
3104 from_bbs = BITMAP_ALLOC (NULL);
3105 if (flag_ira_hoist_pressure)
3106 hoisted_bbs = BITMAP_ALLOC (NULL);
3107
3108 auto_vec<basic_block> dom_tree_walk
3109 = get_all_dominated_blocks (CDI_DOMINATORS,
3110 ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb);
3111
3112 /* Walk over each basic block looking for potentially hoistable
3113 expressions, nothing gets hoisted from the entry block. */
3114 FOR_EACH_VEC_ELT (dom_tree_walk, dom_tree_walk_index, bb)
3115 {
3116 auto_vec<basic_block> domby
3117 = get_dominated_to_depth (CDI_DOMINATORS, bb, param_max_hoist_depth);
3118
3119 if (domby.length () == 0)
3120 continue;
3121
3122 /* Examine each expression that is very busy at the exit of this
3123 block. These are the potentially hoistable expressions. */
3124 for (i = 0; i < SBITMAP_SIZE (hoist_vbeout[bb->index]); i++)
3125 {
3126 if (bitmap_bit_p (hoist_vbeout[bb->index], i))
3127 {
3128 int nregs = 0;
3129 enum reg_class pressure_class = NO_REGS;
3130 /* Current expression. */
3131 struct gcse_expr *expr = index_map[i];
3132 /* Number of occurrences of EXPR that can be hoisted to BB. */
3133 int hoistable = 0;
3134 /* Occurrences reachable from BB. */
3135 vec<occr_t> occrs_to_hoist = vNULL;
3136 /* We want to insert the expression into BB only once, so
3137 note when we've inserted it. */
3138 int insn_inserted_p;
3139 occr_t occr;
3140
3141 /* If an expression is computed in BB and is available at end of
3142 BB, hoist all occurrences dominated by BB to BB. */
3143 if (bitmap_bit_p (comp[bb->index], i))
3144 {
3145 occr = find_occr_in_bb (expr->antic_occr, bb);
3146
3147 if (occr)
3148 {
3149 /* An occurrence might've been already deleted
3150 while processing a dominator of BB. */
3151 if (!occr->deleted_p)
3152 {
3153 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3154 hoistable++;
3155 }
3156 }
3157 else
3158 hoistable++;
3159 }
3160
3161 /* We've found a potentially hoistable expression, now
3162 we look at every block BB dominates to see if it
3163 computes the expression. */
3164 FOR_EACH_VEC_ELT (domby, j, dominated)
3165 {
3166 HOST_WIDE_INT max_distance;
3167
3168 /* Ignore self dominance. */
3169 if (bb == dominated)
3170 continue;
3171 /* We've found a dominated block, now see if it computes
3172 the busy expression and whether or not moving that
3173 expression to the "beginning" of that block is safe. */
3174 if (!bitmap_bit_p (antloc[dominated->index], i))
3175 continue;
3176
3177 occr = find_occr_in_bb (expr->antic_occr, dominated);
3178 gcc_assert (occr);
3179
3180 /* An occurrence might've been already deleted
3181 while processing a dominator of BB. */
3182 if (occr->deleted_p)
3183 continue;
3184 gcc_assert (NONDEBUG_INSN_P (occr->insn));
3185
3186 max_distance = expr->max_distance;
3187 if (max_distance > 0)
3188 /* Adjust MAX_DISTANCE to account for the fact that
3189 OCCR won't have to travel all of DOMINATED, but
3190 only part of it. */
3191 max_distance += (bb_size[dominated->index]
3192 - to_bb_head[INSN_UID (occr->insn)]);
3193
3194 pressure_class = get_pressure_class_and_nregs (occr->insn,
3195 &nregs);
3196
3197 /* Note if the expression should be hoisted from the dominated
3198 block to BB if it can reach DOMINATED unimpared.
3199
3200 Keep track of how many times this expression is hoistable
3201 from a dominated block into BB. */
3202 if (should_hoist_expr_to_dom (bb, expr, dominated, NULL,
3203 max_distance, bb_size,
3204 pressure_class, &nregs,
3205 hoisted_bbs, occr->insn))
3206 {
3207 hoistable++;
3208 occrs_to_hoist.safe_push (occr);
3209 bitmap_set_bit (from_bbs, dominated->index);
3210 }
3211 }
3212
3213 /* If we found more than one hoistable occurrence of this
3214 expression, then note it in the vector of expressions to
3215 hoist. It makes no sense to hoist things which are computed
3216 in only one BB, and doing so tends to pessimize register
3217 allocation. One could increase this value to try harder
3218 to avoid any possible code expansion due to register
3219 allocation issues; however experiments have shown that
3220 the vast majority of hoistable expressions are only movable
3221 from two successors, so raising this threshold is likely
3222 to nullify any benefit we get from code hoisting. */
3223 if (hoistable > 1 && dbg_cnt (hoist_insn))
3224 {
3225 /* If (hoistable != vec::length), then there is
3226 an occurrence of EXPR in BB itself. Don't waste
3227 time looking for LCA in this case. */
3228 if ((unsigned) hoistable == occrs_to_hoist.length ())
3229 {
3230 basic_block lca;
3231
3232 lca = nearest_common_dominator_for_set (CDI_DOMINATORS,
3233 from_bbs);
3234 if (lca != bb)
3235 /* Punt, it's better to hoist these occurrences to
3236 LCA. */
3237 occrs_to_hoist.release ();
3238 }
3239 }
3240 else
3241 /* Punt, no point hoisting a single occurrence. */
3242 occrs_to_hoist.release ();
3243
3244 if (flag_ira_hoist_pressure
3245 && !occrs_to_hoist.is_empty ())
3246 {
3247 /* Increase register pressure of basic blocks to which
3248 expr is hoisted because of extended live range of
3249 output. */
3250 data = BB_DATA (bb);
3251 data->max_reg_pressure[pressure_class] += nregs;
3252 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3253 {
3254 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3255 data->max_reg_pressure[pressure_class] += nregs;
3256 }
3257 }
3258 else if (flag_ira_hoist_pressure)
3259 {
3260 /* Restore register pressure and live_in info for basic
3261 blocks recorded in hoisted_bbs when expr will not be
3262 hoisted. */
3263 EXECUTE_IF_SET_IN_BITMAP (hoisted_bbs, 0, k, bi)
3264 {
3265 data = BB_DATA (BASIC_BLOCK_FOR_FN (cfun, k));
3266 bitmap_copy (data->live_in, data->backup);
3267 data->max_reg_pressure[pressure_class]
3268 = data->old_pressure;
3269 }
3270 }
3271
3272 if (flag_ira_hoist_pressure)
3273 bitmap_clear (hoisted_bbs);
3274
3275 insn_inserted_p = 0;
3276
3277 /* Walk through occurrences of I'th expressions we want
3278 to hoist to BB and make the transformations. */
3279 FOR_EACH_VEC_ELT (occrs_to_hoist, j, occr)
3280 {
3281 rtx_insn *insn;
3282 const_rtx set;
3283
3284 gcc_assert (!occr->deleted_p);
3285
3286 insn = occr->insn;
3287 set = single_set_gcse (insn);
3288
3289 /* Create a pseudo-reg to store the result of reaching
3290 expressions into. Get the mode for the new pseudo
3291 from the mode of the original destination pseudo.
3292
3293 It is important to use new pseudos whenever we
3294 emit a set. This will allow reload to use
3295 rematerialization for such registers. */
3296 if (!insn_inserted_p)
3297 expr->reaching_reg
3298 = gen_reg_rtx_and_attrs (SET_DEST (set));
3299
3300 gcse_emit_move_after (SET_DEST (set), expr->reaching_reg,
3301 insn);
3302 delete_insn (insn);
3303 occr->deleted_p = 1;
3304 changed = 1;
3305 gcse_subst_count++;
3306
3307 if (!insn_inserted_p)
3308 {
3309 insert_insn_end_basic_block (expr, bb);
3310 insn_inserted_p = 1;
3311 }
3312 }
3313
3314 occrs_to_hoist.release ();
3315 bitmap_clear (from_bbs);
3316 }
3317 }
3318 }
3319
3320 BITMAP_FREE (from_bbs);
3321 if (flag_ira_hoist_pressure)
3322 BITMAP_FREE (hoisted_bbs);
3323
3324 free (bb_size);
3325 free (to_bb_head);
3326 free (index_map);
3327
3328 return changed;
3329 }
3330
3331 /* Return pressure class and number of needed hard registers (through
3332 *NREGS) of register REGNO. */
3333 static enum reg_class
get_regno_pressure_class(int regno,int * nregs)3334 get_regno_pressure_class (int regno, int *nregs)
3335 {
3336 if (regno >= FIRST_PSEUDO_REGISTER)
3337 {
3338 enum reg_class pressure_class;
3339
3340 pressure_class = reg_allocno_class (regno);
3341 pressure_class = ira_pressure_class_translate[pressure_class];
3342 *nregs
3343 = ira_reg_class_max_nregs[pressure_class][PSEUDO_REGNO_MODE (regno)];
3344 return pressure_class;
3345 }
3346 else if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno)
3347 && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
3348 {
3349 *nregs = 1;
3350 return ira_pressure_class_translate[REGNO_REG_CLASS (regno)];
3351 }
3352 else
3353 {
3354 *nregs = 0;
3355 return NO_REGS;
3356 }
3357 }
3358
3359 /* Return pressure class and number of hard registers (through *NREGS)
3360 for destination of INSN. */
3361 static enum reg_class
get_pressure_class_and_nregs(rtx_insn * insn,int * nregs)3362 get_pressure_class_and_nregs (rtx_insn *insn, int *nregs)
3363 {
3364 rtx reg;
3365 enum reg_class pressure_class;
3366 const_rtx set = single_set_gcse (insn);
3367
3368 reg = SET_DEST (set);
3369 if (GET_CODE (reg) == SUBREG)
3370 reg = SUBREG_REG (reg);
3371 if (MEM_P (reg))
3372 {
3373 *nregs = 0;
3374 pressure_class = NO_REGS;
3375 }
3376 else
3377 {
3378 gcc_assert (REG_P (reg));
3379 pressure_class = reg_allocno_class (REGNO (reg));
3380 pressure_class = ira_pressure_class_translate[pressure_class];
3381 *nregs
3382 = ira_reg_class_max_nregs[pressure_class][GET_MODE (SET_SRC (set))];
3383 }
3384 return pressure_class;
3385 }
3386
3387 /* Increase (if INCR_P) or decrease current register pressure for
3388 register REGNO. */
3389 static void
change_pressure(int regno,bool incr_p)3390 change_pressure (int regno, bool incr_p)
3391 {
3392 int nregs;
3393 enum reg_class pressure_class;
3394
3395 pressure_class = get_regno_pressure_class (regno, &nregs);
3396 if (! incr_p)
3397 curr_reg_pressure[pressure_class] -= nregs;
3398 else
3399 {
3400 curr_reg_pressure[pressure_class] += nregs;
3401 if (BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3402 < curr_reg_pressure[pressure_class])
3403 BB_DATA (curr_bb)->max_reg_pressure[pressure_class]
3404 = curr_reg_pressure[pressure_class];
3405 }
3406 }
3407
3408 /* Calculate register pressure for each basic block by walking insns
3409 from last to first. */
3410 static void
calculate_bb_reg_pressure(void)3411 calculate_bb_reg_pressure (void)
3412 {
3413 int i;
3414 unsigned int j;
3415 rtx_insn *insn;
3416 basic_block bb;
3417 bitmap curr_regs_live;
3418 bitmap_iterator bi;
3419
3420
3421 ira_setup_eliminable_regset ();
3422 curr_regs_live = BITMAP_ALLOC (®_obstack);
3423 FOR_EACH_BB_FN (bb, cfun)
3424 {
3425 curr_bb = bb;
3426 BB_DATA (bb)->live_in = BITMAP_ALLOC (NULL);
3427 BB_DATA (bb)->backup = BITMAP_ALLOC (NULL);
3428 bitmap_copy (BB_DATA (bb)->live_in, df_get_live_in (bb));
3429 bitmap_copy (curr_regs_live, df_get_live_out (bb));
3430 for (i = 0; i < ira_pressure_classes_num; i++)
3431 curr_reg_pressure[ira_pressure_classes[i]] = 0;
3432 EXECUTE_IF_SET_IN_BITMAP (curr_regs_live, 0, j, bi)
3433 change_pressure (j, true);
3434
3435 FOR_BB_INSNS_REVERSE (bb, insn)
3436 {
3437 rtx dreg;
3438 int regno;
3439 df_ref def, use;
3440
3441 if (! NONDEBUG_INSN_P (insn))
3442 continue;
3443
3444 FOR_EACH_INSN_DEF (def, insn)
3445 {
3446 dreg = DF_REF_REAL_REG (def);
3447 gcc_assert (REG_P (dreg));
3448 regno = REGNO (dreg);
3449 if (!(DF_REF_FLAGS (def)
3450 & (DF_REF_PARTIAL | DF_REF_CONDITIONAL)))
3451 {
3452 if (bitmap_clear_bit (curr_regs_live, regno))
3453 change_pressure (regno, false);
3454 }
3455 }
3456
3457 FOR_EACH_INSN_USE (use, insn)
3458 {
3459 dreg = DF_REF_REAL_REG (use);
3460 gcc_assert (REG_P (dreg));
3461 regno = REGNO (dreg);
3462 if (bitmap_set_bit (curr_regs_live, regno))
3463 change_pressure (regno, true);
3464 }
3465 }
3466 }
3467 BITMAP_FREE (curr_regs_live);
3468
3469 if (dump_file == NULL)
3470 return;
3471
3472 fprintf (dump_file, "\nRegister Pressure: \n");
3473 FOR_EACH_BB_FN (bb, cfun)
3474 {
3475 fprintf (dump_file, " Basic block %d: \n", bb->index);
3476 for (i = 0; (int) i < ira_pressure_classes_num; i++)
3477 {
3478 enum reg_class pressure_class;
3479
3480 pressure_class = ira_pressure_classes[i];
3481 if (BB_DATA (bb)->max_reg_pressure[pressure_class] == 0)
3482 continue;
3483
3484 fprintf (dump_file, " %s=%d\n", reg_class_names[pressure_class],
3485 BB_DATA (bb)->max_reg_pressure[pressure_class]);
3486 }
3487 }
3488 fprintf (dump_file, "\n");
3489 }
3490
3491 /* Top level routine to perform one code hoisting (aka unification) pass
3492
3493 Return nonzero if a change was made. */
3494
3495 static int
one_code_hoisting_pass(void)3496 one_code_hoisting_pass (void)
3497 {
3498 int changed = 0;
3499
3500 gcse_subst_count = 0;
3501 gcse_create_count = 0;
3502
3503 /* Return if there's nothing to do, or it is too expensive. */
3504 if (n_basic_blocks_for_fn (cfun) <= NUM_FIXED_BLOCKS + 1
3505 || gcse_or_cprop_is_too_expensive (_("GCSE disabled")))
3506 return 0;
3507
3508 doing_code_hoisting_p = true;
3509
3510 /* Calculate register pressure for each basic block. */
3511 if (flag_ira_hoist_pressure)
3512 {
3513 regstat_init_n_sets_and_refs ();
3514 ira_set_pseudo_classes (false, dump_file);
3515 alloc_aux_for_blocks (sizeof (struct bb_data));
3516 calculate_bb_reg_pressure ();
3517 regstat_free_n_sets_and_refs ();
3518 }
3519
3520 /* We need alias. */
3521 init_alias_analysis ();
3522
3523 bytes_used = 0;
3524 gcc_obstack_init (&gcse_obstack);
3525 alloc_gcse_mem ();
3526
3527 alloc_hash_table (&expr_hash_table);
3528 compute_hash_table (&expr_hash_table);
3529 if (dump_file)
3530 dump_hash_table (dump_file, "Code Hosting Expressions", &expr_hash_table);
3531
3532 if (expr_hash_table.n_elems > 0)
3533 {
3534 alloc_code_hoist_mem (last_basic_block_for_fn (cfun),
3535 expr_hash_table.n_elems);
3536 compute_code_hoist_data ();
3537 changed = hoist_code ();
3538 free_code_hoist_mem ();
3539 }
3540
3541 if (flag_ira_hoist_pressure)
3542 {
3543 free_aux_for_blocks ();
3544 free_reg_info ();
3545 }
3546 free_hash_table (&expr_hash_table);
3547 free_gcse_mem ();
3548 obstack_free (&gcse_obstack, NULL);
3549
3550 /* We are finished with alias. */
3551 end_alias_analysis ();
3552
3553 if (dump_file)
3554 {
3555 fprintf (dump_file, "HOIST of %s, %d basic blocks, %d bytes needed, ",
3556 current_function_name (), n_basic_blocks_for_fn (cfun),
3557 bytes_used);
3558 fprintf (dump_file, "%d substs, %d insns created\n",
3559 gcse_subst_count, gcse_create_count);
3560 }
3561
3562 doing_code_hoisting_p = false;
3563
3564 return changed;
3565 }
3566
3567 /* Here we provide the things required to do store motion towards the exit.
3568 In order for this to be effective, gcse also needed to be taught how to
3569 move a load when it is killed only by a store to itself.
3570
3571 int i;
3572 float a[10];
3573
3574 void foo(float scale)
3575 {
3576 for (i=0; i<10; i++)
3577 a[i] *= scale;
3578 }
3579
3580 'i' is both loaded and stored to in the loop. Normally, gcse cannot move
3581 the load out since its live around the loop, and stored at the bottom
3582 of the loop.
3583
3584 The 'Load Motion' referred to and implemented in this file is
3585 an enhancement to gcse which when using edge based LCM, recognizes
3586 this situation and allows gcse to move the load out of the loop.
3587
3588 Once gcse has hoisted the load, store motion can then push this
3589 load towards the exit, and we end up with no loads or stores of 'i'
3590 in the loop. */
3591
3592 /* This will search the ldst list for a matching expression. If it
3593 doesn't find one, we create one and initialize it. */
3594
3595 static struct ls_expr *
ldst_entry(rtx x)3596 ldst_entry (rtx x)
3597 {
3598 int do_not_record_p = 0;
3599 struct ls_expr * ptr;
3600 unsigned int hash;
3601 ls_expr **slot;
3602 struct ls_expr e;
3603
3604 hash = hash_rtx (x, GET_MODE (x), &do_not_record_p,
3605 NULL, /*have_reg_qty=*/false);
3606
3607 e.pattern = x;
3608 slot = pre_ldst_table->find_slot_with_hash (&e, hash, INSERT);
3609 if (*slot)
3610 return *slot;
3611
3612 ptr = XNEW (struct ls_expr);
3613
3614 ptr->next = pre_ldst_mems;
3615 ptr->expr = NULL;
3616 ptr->pattern = x;
3617 ptr->pattern_regs = NULL_RTX;
3618 ptr->stores.create (0);
3619 ptr->reaching_reg = NULL_RTX;
3620 ptr->invalid = 0;
3621 ptr->index = 0;
3622 ptr->hash_index = hash;
3623 pre_ldst_mems = ptr;
3624 *slot = ptr;
3625
3626 return ptr;
3627 }
3628
3629 /* Free up an individual ldst entry. */
3630
3631 static void
free_ldst_entry(struct ls_expr * ptr)3632 free_ldst_entry (struct ls_expr * ptr)
3633 {
3634 ptr->stores.release ();
3635
3636 free (ptr);
3637 }
3638
3639 /* Free up all memory associated with the ldst list. */
3640
3641 static void
free_ld_motion_mems(void)3642 free_ld_motion_mems (void)
3643 {
3644 delete pre_ldst_table;
3645 pre_ldst_table = NULL;
3646
3647 while (pre_ldst_mems)
3648 {
3649 struct ls_expr * tmp = pre_ldst_mems;
3650
3651 pre_ldst_mems = pre_ldst_mems->next;
3652
3653 free_ldst_entry (tmp);
3654 }
3655
3656 pre_ldst_mems = NULL;
3657 }
3658
3659 /* Dump debugging info about the ldst list. */
3660
3661 static void
print_ldst_list(FILE * file)3662 print_ldst_list (FILE * file)
3663 {
3664 struct ls_expr * ptr;
3665
3666 fprintf (file, "LDST list: \n");
3667
3668 for (ptr = pre_ldst_mems; ptr != NULL; ptr = ptr->next)
3669 {
3670 fprintf (file, " Pattern (%3d): ", ptr->index);
3671
3672 print_rtl (file, ptr->pattern);
3673
3674 fprintf (file, "\n Stores : ");
3675 print_rtx_insn_vec (file, ptr->stores);
3676
3677 fprintf (file, "\n\n");
3678 }
3679
3680 fprintf (file, "\n");
3681 }
3682
3683 /* Returns 1 if X is in the list of ldst only expressions. */
3684
3685 static struct ls_expr *
find_rtx_in_ldst(rtx x)3686 find_rtx_in_ldst (rtx x)
3687 {
3688 struct ls_expr e;
3689 ls_expr **slot;
3690 if (!pre_ldst_table)
3691 return NULL;
3692 e.pattern = x;
3693 slot = pre_ldst_table->find_slot (&e, NO_INSERT);
3694 if (!slot || (*slot)->invalid)
3695 return NULL;
3696 return *slot;
3697 }
3698
3699 /* Load Motion for loads which only kill themselves. */
3700
3701 /* Return true if x, a MEM, is a simple access with no side effects.
3702 These are the types of loads we consider for the ld_motion list,
3703 otherwise we let the usual aliasing take care of it. */
3704
3705 static int
simple_mem(const_rtx x)3706 simple_mem (const_rtx x)
3707 {
3708 if (MEM_VOLATILE_P (x))
3709 return 0;
3710
3711 if (GET_MODE (x) == BLKmode)
3712 return 0;
3713
3714 /* If we are handling exceptions, we must be careful with memory references
3715 that may trap. If we are not, the behavior is undefined, so we may just
3716 continue. */
3717 if (cfun->can_throw_non_call_exceptions && may_trap_p (x))
3718 return 0;
3719
3720 if (side_effects_p (x))
3721 return 0;
3722
3723 /* Do not consider function arguments passed on stack. */
3724 if (reg_mentioned_p (stack_pointer_rtx, x))
3725 return 0;
3726
3727 if (flag_float_store && FLOAT_MODE_P (GET_MODE (x)))
3728 return 0;
3729
3730 return 1;
3731 }
3732
3733 /* Make sure there isn't a buried reference in this pattern anywhere.
3734 If there is, invalidate the entry for it since we're not capable
3735 of fixing it up just yet.. We have to be sure we know about ALL
3736 loads since the aliasing code will allow all entries in the
3737 ld_motion list to not-alias itself. If we miss a load, we will get
3738 the wrong value since gcse might common it and we won't know to
3739 fix it up. */
3740
3741 static void
invalidate_any_buried_refs(rtx x)3742 invalidate_any_buried_refs (rtx x)
3743 {
3744 const char * fmt;
3745 int i, j;
3746 struct ls_expr * ptr;
3747
3748 /* Invalidate it in the list. */
3749 if (MEM_P (x) && simple_mem (x))
3750 {
3751 ptr = ldst_entry (x);
3752 ptr->invalid = 1;
3753 }
3754
3755 /* Recursively process the insn. */
3756 fmt = GET_RTX_FORMAT (GET_CODE (x));
3757
3758 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3759 {
3760 if (fmt[i] == 'e')
3761 invalidate_any_buried_refs (XEXP (x, i));
3762 else if (fmt[i] == 'E')
3763 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3764 invalidate_any_buried_refs (XVECEXP (x, i, j));
3765 }
3766 }
3767
3768 /* Find all the 'simple' MEMs which are used in LOADs and STORES. Simple
3769 being defined as MEM loads and stores to symbols, with no side effects
3770 and no registers in the expression. For a MEM destination, we also
3771 check that the insn is still valid if we replace the destination with a
3772 REG, as is done in update_ld_motion_stores. If there are any uses/defs
3773 which don't match this criteria, they are invalidated and trimmed out
3774 later. */
3775
3776 static void
compute_ld_motion_mems(void)3777 compute_ld_motion_mems (void)
3778 {
3779 struct ls_expr * ptr;
3780 basic_block bb;
3781 rtx_insn *insn;
3782
3783 pre_ldst_mems = NULL;
3784 pre_ldst_table = new hash_table<pre_ldst_expr_hasher> (13);
3785
3786 FOR_EACH_BB_FN (bb, cfun)
3787 {
3788 FOR_BB_INSNS (bb, insn)
3789 {
3790 if (NONDEBUG_INSN_P (insn))
3791 {
3792 if (GET_CODE (PATTERN (insn)) == SET)
3793 {
3794 rtx src = SET_SRC (PATTERN (insn));
3795 rtx dest = SET_DEST (PATTERN (insn));
3796
3797 /* Check for a simple load. */
3798 if (MEM_P (src) && simple_mem (src))
3799 {
3800 ptr = ldst_entry (src);
3801 if (!REG_P (dest))
3802 ptr->invalid = 1;
3803 }
3804 else
3805 {
3806 /* Make sure there isn't a buried load somewhere. */
3807 invalidate_any_buried_refs (src);
3808 }
3809
3810 /* Check for a simple load through a REG_EQUAL note. */
3811 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3812 if (note
3813 && REG_NOTE_KIND (note) == REG_EQUAL
3814 && (src_eq = XEXP (note, 0))
3815 && !(MEM_P (src_eq) && simple_mem (src_eq)))
3816 invalidate_any_buried_refs (src_eq);
3817
3818 /* Check for stores. Don't worry about aliased ones, they
3819 will block any movement we might do later. We only care
3820 about this exact pattern since those are the only
3821 circumstance that we will ignore the aliasing info. */
3822 if (MEM_P (dest) && simple_mem (dest))
3823 {
3824 ptr = ldst_entry (dest);
3825 machine_mode src_mode = GET_MODE (src);
3826 if (! MEM_P (src)
3827 && GET_CODE (src) != ASM_OPERANDS
3828 /* Check for REG manually since want_to_gcse_p
3829 returns 0 for all REGs. */
3830 && can_assign_to_reg_without_clobbers_p (src,
3831 src_mode))
3832 ptr->stores.safe_push (insn);
3833 else
3834 ptr->invalid = 1;
3835 }
3836 }
3837 else
3838 {
3839 /* Invalidate all MEMs in the pattern and... */
3840 invalidate_any_buried_refs (PATTERN (insn));
3841
3842 /* ...in REG_EQUAL notes for PARALLELs with single SET. */
3843 rtx note = find_reg_equal_equiv_note (insn), src_eq;
3844 if (note
3845 && REG_NOTE_KIND (note) == REG_EQUAL
3846 && (src_eq = XEXP (note, 0)))
3847 invalidate_any_buried_refs (src_eq);
3848 }
3849 }
3850 }
3851 }
3852 }
3853
3854 /* Remove any references that have been either invalidated or are not in the
3855 expression list for pre gcse. */
3856
3857 static void
trim_ld_motion_mems(void)3858 trim_ld_motion_mems (void)
3859 {
3860 struct ls_expr * * last = & pre_ldst_mems;
3861 struct ls_expr * ptr = pre_ldst_mems;
3862
3863 while (ptr != NULL)
3864 {
3865 struct gcse_expr * expr;
3866
3867 /* Delete if entry has been made invalid. */
3868 if (! ptr->invalid)
3869 {
3870 /* Delete if we cannot find this mem in the expression list. */
3871 unsigned int hash = ptr->hash_index % expr_hash_table.size;
3872
3873 for (expr = expr_hash_table.table[hash];
3874 expr != NULL;
3875 expr = expr->next_same_hash)
3876 if (expr_equiv_p (expr->expr, ptr->pattern))
3877 break;
3878 }
3879 else
3880 expr = (struct gcse_expr *) 0;
3881
3882 if (expr)
3883 {
3884 /* Set the expression field if we are keeping it. */
3885 ptr->expr = expr;
3886 last = & ptr->next;
3887 ptr = ptr->next;
3888 }
3889 else
3890 {
3891 *last = ptr->next;
3892 pre_ldst_table->remove_elt_with_hash (ptr, ptr->hash_index);
3893 free_ldst_entry (ptr);
3894 ptr = * last;
3895 }
3896 }
3897
3898 /* Show the world what we've found. */
3899 if (dump_file && pre_ldst_mems != NULL)
3900 print_ldst_list (dump_file);
3901 }
3902
3903 /* This routine will take an expression which we are replacing with
3904 a reaching register, and update any stores that are needed if
3905 that expression is in the ld_motion list. Stores are updated by
3906 copying their SRC to the reaching register, and then storing
3907 the reaching register into the store location. These keeps the
3908 correct value in the reaching register for the loads. */
3909
3910 static void
update_ld_motion_stores(struct gcse_expr * expr)3911 update_ld_motion_stores (struct gcse_expr * expr)
3912 {
3913 struct ls_expr * mem_ptr;
3914
3915 if ((mem_ptr = find_rtx_in_ldst (expr->expr)))
3916 {
3917 /* We can try to find just the REACHED stores, but is shouldn't
3918 matter to set the reaching reg everywhere... some might be
3919 dead and should be eliminated later. */
3920
3921 /* We replace (set mem expr) with (set reg expr) (set mem reg)
3922 where reg is the reaching reg used in the load. We checked in
3923 compute_ld_motion_mems that we can replace (set mem expr) with
3924 (set reg expr) in that insn. */
3925 rtx_insn *insn;
3926 unsigned int i;
3927 FOR_EACH_VEC_ELT_REVERSE (mem_ptr->stores, i, insn)
3928 {
3929 rtx pat = PATTERN (insn);
3930 rtx src = SET_SRC (pat);
3931 rtx reg = expr->reaching_reg;
3932
3933 /* If we've already copied it, continue. */
3934 if (expr->reaching_reg == src)
3935 continue;
3936
3937 if (dump_file)
3938 {
3939 fprintf (dump_file, "PRE: store updated with reaching reg ");
3940 print_rtl (dump_file, reg);
3941 fprintf (dump_file, ":\n ");
3942 print_inline_rtx (dump_file, insn, 8);
3943 fprintf (dump_file, "\n");
3944 }
3945
3946 rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
3947 emit_insn_before (copy, insn);
3948 SET_SRC (pat) = reg;
3949 df_insn_rescan (insn);
3950
3951 /* un-recognize this pattern since it's probably different now. */
3952 INSN_CODE (insn) = -1;
3953 gcse_create_count++;
3954 }
3955 }
3956 }
3957
3958 /* Return true if the graph is too expensive to optimize. PASS is the
3959 optimization about to be performed. */
3960
3961 bool
gcse_or_cprop_is_too_expensive(const char * pass)3962 gcse_or_cprop_is_too_expensive (const char *pass)
3963 {
3964 unsigned HOST_WIDE_INT memory_request
3965 = ((unsigned HOST_WIDE_INT)n_basic_blocks_for_fn (cfun)
3966 * SBITMAP_SET_SIZE (max_reg_num ()) * sizeof (SBITMAP_ELT_TYPE));
3967
3968 /* Trying to perform global optimizations on flow graphs which have
3969 a high connectivity will take a long time and is unlikely to be
3970 particularly useful.
3971
3972 In normal circumstances a cfg should have about twice as many
3973 edges as blocks. But we do not want to punish small functions
3974 which have a couple switch statements. Rather than simply
3975 threshold the number of blocks, uses something with a more
3976 graceful degradation. */
3977 if (n_edges_for_fn (cfun) > 20000 + n_basic_blocks_for_fn (cfun) * 4)
3978 {
3979 warning (OPT_Wdisabled_optimization,
3980 "%s: %d basic blocks and %d edges/basic block",
3981 pass, n_basic_blocks_for_fn (cfun),
3982 n_edges_for_fn (cfun) / n_basic_blocks_for_fn (cfun));
3983
3984 return true;
3985 }
3986
3987 /* If allocating memory for the dataflow bitmaps would take up too much
3988 storage it's better just to disable the optimization. */
3989 if (memory_request / 1024 > (unsigned HOST_WIDE_INT)param_max_gcse_memory)
3990 {
3991 warning (OPT_Wdisabled_optimization,
3992 "%s: %d basic blocks and %d registers; "
3993 "increase %<--param max-gcse-memory%> above %wu",
3994 pass, n_basic_blocks_for_fn (cfun), max_reg_num (),
3995 memory_request / 1024);
3996
3997 return true;
3998 }
3999
4000 return false;
4001 }
4002
4003 static unsigned int
execute_rtl_pre(void)4004 execute_rtl_pre (void)
4005 {
4006 int changed;
4007 delete_unreachable_blocks ();
4008 df_analyze ();
4009 changed = one_pre_gcse_pass ();
4010 flag_rerun_cse_after_global_opts |= changed;
4011 if (changed)
4012 cleanup_cfg (0);
4013 return 0;
4014 }
4015
4016 static unsigned int
execute_rtl_hoist(void)4017 execute_rtl_hoist (void)
4018 {
4019 int changed;
4020 delete_unreachable_blocks ();
4021 df_analyze ();
4022 changed = one_code_hoisting_pass ();
4023 flag_rerun_cse_after_global_opts |= changed;
4024 if (changed)
4025 cleanup_cfg (0);
4026 return 0;
4027 }
4028
4029 namespace {
4030
4031 const pass_data pass_data_rtl_pre =
4032 {
4033 RTL_PASS, /* type */
4034 "rtl pre", /* name */
4035 OPTGROUP_NONE, /* optinfo_flags */
4036 TV_PRE, /* tv_id */
4037 PROP_cfglayout, /* properties_required */
4038 0, /* properties_provided */
4039 0, /* properties_destroyed */
4040 0, /* todo_flags_start */
4041 TODO_df_finish, /* todo_flags_finish */
4042 };
4043
4044 class pass_rtl_pre : public rtl_opt_pass
4045 {
4046 public:
pass_rtl_pre(gcc::context * ctxt)4047 pass_rtl_pre (gcc::context *ctxt)
4048 : rtl_opt_pass (pass_data_rtl_pre, ctxt)
4049 {}
4050
4051 /* opt_pass methods: */
4052 virtual bool gate (function *);
execute(function *)4053 virtual unsigned int execute (function *) { return execute_rtl_pre (); }
4054
4055 }; // class pass_rtl_pre
4056
4057 /* We do not construct an accurate cfg in functions which call
4058 setjmp, so none of these passes runs if the function calls
4059 setjmp.
4060 FIXME: Should just handle setjmp via REG_SETJMP notes. */
4061
4062 bool
gate(function * fun)4063 pass_rtl_pre::gate (function *fun)
4064 {
4065 return optimize > 0 && flag_gcse
4066 && !fun->calls_setjmp
4067 && optimize_function_for_speed_p (fun)
4068 && dbg_cnt (pre);
4069 }
4070
4071 } // anon namespace
4072
4073 rtl_opt_pass *
make_pass_rtl_pre(gcc::context * ctxt)4074 make_pass_rtl_pre (gcc::context *ctxt)
4075 {
4076 return new pass_rtl_pre (ctxt);
4077 }
4078
4079 namespace {
4080
4081 const pass_data pass_data_rtl_hoist =
4082 {
4083 RTL_PASS, /* type */
4084 "hoist", /* name */
4085 OPTGROUP_NONE, /* optinfo_flags */
4086 TV_HOIST, /* tv_id */
4087 PROP_cfglayout, /* properties_required */
4088 0, /* properties_provided */
4089 0, /* properties_destroyed */
4090 0, /* todo_flags_start */
4091 TODO_df_finish, /* todo_flags_finish */
4092 };
4093
4094 class pass_rtl_hoist : public rtl_opt_pass
4095 {
4096 public:
pass_rtl_hoist(gcc::context * ctxt)4097 pass_rtl_hoist (gcc::context *ctxt)
4098 : rtl_opt_pass (pass_data_rtl_hoist, ctxt)
4099 {}
4100
4101 /* opt_pass methods: */
4102 virtual bool gate (function *);
execute(function *)4103 virtual unsigned int execute (function *) { return execute_rtl_hoist (); }
4104
4105 }; // class pass_rtl_hoist
4106
4107 bool
gate(function *)4108 pass_rtl_hoist::gate (function *)
4109 {
4110 return optimize > 0 && flag_gcse
4111 && !cfun->calls_setjmp
4112 /* It does not make sense to run code hoisting unless we are optimizing
4113 for code size -- it rarely makes programs faster, and can make then
4114 bigger if we did PRE (when optimizing for space, we don't run PRE). */
4115 && optimize_function_for_size_p (cfun)
4116 && dbg_cnt (hoist);
4117 }
4118
4119 } // anon namespace
4120
4121 rtl_opt_pass *
make_pass_rtl_hoist(gcc::context * ctxt)4122 make_pass_rtl_hoist (gcc::context *ctxt)
4123 {
4124 return new pass_rtl_hoist (ctxt);
4125 }
4126
4127 /* Reset all state within gcse.cc so that we can rerun the compiler
4128 within the same process. For use by toplev::finalize. */
4129
4130 void
gcse_cc_finalize(void)4131 gcse_cc_finalize (void)
4132 {
4133 test_insn = NULL;
4134 }
4135
4136 #include "gt-gcse.h"
4137