xref: /netbsd-src/external/gpl3/gcc/dist/gcc/sched-deps.cc (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1 /* Instruction scheduling pass.  This file computes dependencies between
2    instructions.
3    Copyright (C) 1992-2022 Free Software Foundation, Inc.
4    Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5    and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 
7 This file is part of GCC.
8 
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13 
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17 for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3.  If not see
21 <http://www.gnu.org/licenses/>.  */
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "df.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "memmodel.h"
34 #include "ira.h"
35 #include "ira-int.h"
36 #include "insn-attr.h"
37 #include "cfgbuild.h"
38 #include "sched-int.h"
39 #include "cselib.h"
40 #include "function-abi.h"
41 
42 #ifdef INSN_SCHEDULING
43 
44 /* Holds current parameters for the dependency analyzer.  */
45 struct sched_deps_info_def *sched_deps_info;
46 
47 /* The data is specific to the Haifa scheduler.  */
48 vec<haifa_deps_insn_data_def>
49     h_d_i_d = vNULL;
50 
51 /* Return the major type present in the DS.  */
52 enum reg_note
ds_to_dk(ds_t ds)53 ds_to_dk (ds_t ds)
54 {
55   if (ds & DEP_TRUE)
56     return REG_DEP_TRUE;
57 
58   if (ds & DEP_OUTPUT)
59     return REG_DEP_OUTPUT;
60 
61   if (ds & DEP_CONTROL)
62     return REG_DEP_CONTROL;
63 
64   gcc_assert (ds & DEP_ANTI);
65 
66   return REG_DEP_ANTI;
67 }
68 
69 /* Return equivalent dep_status.  */
70 ds_t
dk_to_ds(enum reg_note dk)71 dk_to_ds (enum reg_note dk)
72 {
73   switch (dk)
74     {
75     case REG_DEP_TRUE:
76       return DEP_TRUE;
77 
78     case REG_DEP_OUTPUT:
79       return DEP_OUTPUT;
80 
81     case REG_DEP_CONTROL:
82       return DEP_CONTROL;
83 
84     default:
85       gcc_assert (dk == REG_DEP_ANTI);
86       return DEP_ANTI;
87     }
88 }
89 
90 /* Functions to operate with dependence information container - dep_t.  */
91 
92 /* Init DEP with the arguments.  */
93 void
init_dep_1(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note type,ds_t ds)94 init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 {
96   DEP_PRO (dep) = pro;
97   DEP_CON (dep) = con;
98   DEP_TYPE (dep) = type;
99   DEP_STATUS (dep) = ds;
100   DEP_COST (dep) = UNKNOWN_DEP_COST;
101   DEP_NONREG (dep) = 0;
102   DEP_MULTIPLE (dep) = 0;
103   DEP_REPLACE (dep) = NULL;
104   dep->unused = 0;
105 }
106 
107 /* Init DEP with the arguments.
108    While most of the scheduler (including targets) only need the major type
109    of the dependency, it is convenient to hide full dep_status from them.  */
110 void
init_dep(dep_t dep,rtx_insn * pro,rtx_insn * con,enum reg_note kind)111 init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
112 {
113   ds_t ds;
114 
115   if ((current_sched_info->flags & USE_DEPS_LIST))
116     ds = dk_to_ds (kind);
117   else
118     ds = 0;
119 
120   init_dep_1 (dep, pro, con, kind, ds);
121 }
122 
123 /* Make a copy of FROM in TO.  */
124 static void
copy_dep(dep_t to,dep_t from)125 copy_dep (dep_t to, dep_t from)
126 {
127   memcpy (to, from, sizeof (*to));
128 }
129 
130 static void dump_ds (FILE *, ds_t);
131 
132 /* Define flags for dump_dep ().  */
133 
134 /* Dump producer of the dependence.  */
135 #define DUMP_DEP_PRO (2)
136 
137 /* Dump consumer of the dependence.  */
138 #define DUMP_DEP_CON (4)
139 
140 /* Dump type of the dependence.  */
141 #define DUMP_DEP_TYPE (8)
142 
143 /* Dump status of the dependence.  */
144 #define DUMP_DEP_STATUS (16)
145 
146 /* Dump all information about the dependence.  */
147 #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE	\
148 		      |DUMP_DEP_STATUS)
149 
150 /* Dump DEP to DUMP.
151    FLAGS is a bit mask specifying what information about DEP needs
152    to be printed.
153    If FLAGS has the very first bit set, then dump all information about DEP
154    and propagate this bit into the callee dump functions.  */
155 static void
dump_dep(FILE * dump,dep_t dep,int flags)156 dump_dep (FILE *dump, dep_t dep, int flags)
157 {
158   if (flags & 1)
159     flags |= DUMP_DEP_ALL;
160 
161   fprintf (dump, "<");
162 
163   if (flags & DUMP_DEP_PRO)
164     fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
165 
166   if (flags & DUMP_DEP_CON)
167     fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
168 
169   if (flags & DUMP_DEP_TYPE)
170     {
171       char t;
172       enum reg_note type = DEP_TYPE (dep);
173 
174       switch (type)
175 	{
176 	case REG_DEP_TRUE:
177 	  t = 't';
178 	  break;
179 
180 	case REG_DEP_OUTPUT:
181 	  t = 'o';
182 	  break;
183 
184 	case REG_DEP_CONTROL:
185 	  t = 'c';
186 	  break;
187 
188 	case REG_DEP_ANTI:
189 	  t = 'a';
190 	  break;
191 
192 	default:
193 	  gcc_unreachable ();
194 	  break;
195 	}
196 
197       fprintf (dump, "%c; ", t);
198     }
199 
200   if (flags & DUMP_DEP_STATUS)
201     {
202       if (current_sched_info->flags & USE_DEPS_LIST)
203 	dump_ds (dump, DEP_STATUS (dep));
204     }
205 
206   fprintf (dump, ">");
207 }
208 
209 /* Default flags for dump_dep ().  */
210 static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
211 
212 /* Dump all fields of DEP to STDERR.  */
213 void
sd_debug_dep(dep_t dep)214 sd_debug_dep (dep_t dep)
215 {
216   dump_dep (stderr, dep, 1);
217   fprintf (stderr, "\n");
218 }
219 
220 /* Determine whether DEP is a dependency link of a non-debug insn on a
221    debug insn.  */
222 
223 static inline bool
depl_on_debug_p(dep_link_t dep)224 depl_on_debug_p (dep_link_t dep)
225 {
226   return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
227 	  && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228 }
229 
230 /* Functions to operate with a single link from the dependencies lists -
231    dep_link_t.  */
232 
233 /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
234    PREV_NEXT_P.  */
235 static void
attach_dep_link(dep_link_t l,dep_link_t * prev_nextp)236 attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
237 {
238   dep_link_t next = *prev_nextp;
239 
240   gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
241 	      && DEP_LINK_NEXT (l) == NULL);
242 
243   /* Init node being inserted.  */
244   DEP_LINK_PREV_NEXTP (l) = prev_nextp;
245   DEP_LINK_NEXT (l) = next;
246 
247   /* Fix next node.  */
248   if (next != NULL)
249     {
250       gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
251 
252       DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253     }
254 
255   /* Fix prev node.  */
256   *prev_nextp = l;
257 }
258 
259 /* Add dep_link LINK to deps_list L.  */
260 static void
add_to_deps_list(dep_link_t link,deps_list_t l)261 add_to_deps_list (dep_link_t link, deps_list_t l)
262 {
263   attach_dep_link (link, &DEPS_LIST_FIRST (l));
264 
265   /* Don't count debug deps.  */
266   if (!depl_on_debug_p (link))
267     ++DEPS_LIST_N_LINKS (l);
268 }
269 
270 /* Detach dep_link L from the list.  */
271 static void
detach_dep_link(dep_link_t l)272 detach_dep_link (dep_link_t l)
273 {
274   dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
275   dep_link_t next = DEP_LINK_NEXT (l);
276 
277   *prev_nextp = next;
278 
279   if (next != NULL)
280     DEP_LINK_PREV_NEXTP (next) = prev_nextp;
281 
282   DEP_LINK_PREV_NEXTP (l) = NULL;
283   DEP_LINK_NEXT (l) = NULL;
284 }
285 
286 /* Remove link LINK from list LIST.  */
287 static void
remove_from_deps_list(dep_link_t link,deps_list_t list)288 remove_from_deps_list (dep_link_t link, deps_list_t list)
289 {
290   detach_dep_link (link);
291 
292   /* Don't count debug deps.  */
293   if (!depl_on_debug_p (link))
294     --DEPS_LIST_N_LINKS (list);
295 }
296 
297 /* Move link LINK from list FROM to list TO.  */
298 static void
move_dep_link(dep_link_t link,deps_list_t from,deps_list_t to)299 move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
300 {
301   remove_from_deps_list (link, from);
302   add_to_deps_list (link, to);
303 }
304 
305 /* Return true of LINK is not attached to any list.  */
306 static bool
dep_link_is_detached_p(dep_link_t link)307 dep_link_is_detached_p (dep_link_t link)
308 {
309   return DEP_LINK_PREV_NEXTP (link) == NULL;
310 }
311 
312 /* Pool to hold all dependency nodes (dep_node_t).  */
313 static object_allocator<_dep_node> *dn_pool;
314 
315 /* Number of dep_nodes out there.  */
316 static int dn_pool_diff = 0;
317 
318 /* Create a dep_node.  */
319 static dep_node_t
create_dep_node(void)320 create_dep_node (void)
321 {
322   dep_node_t n = dn_pool->allocate ();
323   dep_link_t back = DEP_NODE_BACK (n);
324   dep_link_t forw = DEP_NODE_FORW (n);
325 
326   DEP_LINK_NODE (back) = n;
327   DEP_LINK_NEXT (back) = NULL;
328   DEP_LINK_PREV_NEXTP (back) = NULL;
329 
330   DEP_LINK_NODE (forw) = n;
331   DEP_LINK_NEXT (forw) = NULL;
332   DEP_LINK_PREV_NEXTP (forw) = NULL;
333 
334   ++dn_pool_diff;
335 
336   return n;
337 }
338 
339 /* Delete dep_node N.  N must not be connected to any deps_list.  */
340 static void
delete_dep_node(dep_node_t n)341 delete_dep_node (dep_node_t n)
342 {
343   gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
344 	      && dep_link_is_detached_p (DEP_NODE_FORW (n)));
345 
346   XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
347 
348   --dn_pool_diff;
349 
350   dn_pool->remove (n);
351 }
352 
353 /* Pool to hold dependencies lists (deps_list_t).  */
354 static object_allocator<_deps_list> *dl_pool;
355 
356 /* Number of deps_lists out there.  */
357 static int dl_pool_diff = 0;
358 
359 /* Functions to operate with dependences lists - deps_list_t.  */
360 
361 /* Return true if list L is empty.  */
362 static bool
deps_list_empty_p(deps_list_t l)363 deps_list_empty_p (deps_list_t l)
364 {
365   return DEPS_LIST_N_LINKS (l) == 0;
366 }
367 
368 /* Create a new deps_list.  */
369 static deps_list_t
create_deps_list(void)370 create_deps_list (void)
371 {
372   deps_list_t l = dl_pool->allocate ();
373 
374   DEPS_LIST_FIRST (l) = NULL;
375   DEPS_LIST_N_LINKS (l) = 0;
376 
377   ++dl_pool_diff;
378   return l;
379 }
380 
381 /* Free deps_list L.  */
382 static void
free_deps_list(deps_list_t l)383 free_deps_list (deps_list_t l)
384 {
385   gcc_assert (deps_list_empty_p (l));
386 
387   --dl_pool_diff;
388 
389   dl_pool->remove (l);
390 }
391 
392 /* Return true if there is no dep_nodes and deps_lists out there.
393    After the region is scheduled all the dependency nodes and lists
394    should [generally] be returned to pool.  */
395 bool
deps_pools_are_empty_p(void)396 deps_pools_are_empty_p (void)
397 {
398   return dn_pool_diff == 0 && dl_pool_diff == 0;
399 }
400 
401 /* Remove all elements from L.  */
402 static void
clear_deps_list(deps_list_t l)403 clear_deps_list (deps_list_t l)
404 {
405   do
406     {
407       dep_link_t link = DEPS_LIST_FIRST (l);
408 
409       if (link == NULL)
410 	break;
411 
412       remove_from_deps_list (link, l);
413     }
414   while (1);
415 }
416 
417 /* Decide whether a dependency should be treated as a hard or a speculative
418    dependency.  */
419 static bool
dep_spec_p(dep_t dep)420 dep_spec_p (dep_t dep)
421 {
422   if (current_sched_info->flags & DO_SPECULATION)
423     {
424       if (DEP_STATUS (dep) & SPECULATIVE)
425 	return true;
426     }
427   if (current_sched_info->flags & DO_PREDICATION)
428     {
429       if (DEP_TYPE (dep) == REG_DEP_CONTROL)
430 	return true;
431     }
432   if (DEP_REPLACE (dep) != NULL)
433     return true;
434   return false;
435 }
436 
437 static regset reg_pending_sets;
438 static regset reg_pending_clobbers;
439 static regset reg_pending_uses;
440 static regset reg_pending_control_uses;
441 static enum reg_pending_barrier_mode reg_pending_barrier;
442 
443 /* Hard registers implicitly clobbered or used (or may be implicitly
444    clobbered or used) by the currently analyzed insn.  For example,
445    insn in its constraint has one register class.  Even if there is
446    currently no hard register in the insn, the particular hard
447    register will be in the insn after reload pass because the
448    constraint requires it.  */
449 static HARD_REG_SET implicit_reg_pending_clobbers;
450 static HARD_REG_SET implicit_reg_pending_uses;
451 
452 /* To speed up the test for duplicate dependency links we keep a
453    record of dependencies created by add_dependence when the average
454    number of instructions in a basic block is very large.
455 
456    Studies have shown that there is typically around 5 instructions between
457    branches for typical C code.  So we can make a guess that the average
458    basic block is approximately 5 instructions long; we will choose 100X
459    the average size as a very large basic block.
460 
461    Each insn has associated bitmaps for its dependencies.  Each bitmap
462    has enough entries to represent a dependency on any other insn in
463    the insn chain.  All bitmap for true dependencies cache is
464    allocated then the rest two ones are also allocated.  */
465 static bitmap true_dependency_cache = NULL;
466 static bitmap output_dependency_cache = NULL;
467 static bitmap anti_dependency_cache = NULL;
468 static bitmap control_dependency_cache = NULL;
469 static bitmap spec_dependency_cache = NULL;
470 static int cache_size;
471 
472 /* True if we should mark added dependencies as a non-register deps.  */
473 static bool mark_as_hard;
474 
475 static int deps_may_trap_p (const_rtx);
476 static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
477 static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
478 				 enum reg_note, bool);
479 static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
480 					  rtx_insn_list **, int, enum reg_note,
481 					  bool);
482 static void delete_all_dependences (rtx_insn *);
483 static void chain_to_prev_insn (rtx_insn *);
484 
485 static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
486 static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
487 static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
488 static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
489 
490 static bool sched_has_condition_p (const rtx_insn *);
491 static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
492 
493 static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
494 							  rtx, rtx);
495 static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
496 
497 static void check_dep (dep_t, bool);
498 
499 
500 /* Return nonzero if a load of the memory reference MEM can cause a trap.  */
501 
502 static int
deps_may_trap_p(const_rtx mem)503 deps_may_trap_p (const_rtx mem)
504 {
505   const_rtx addr = XEXP (mem, 0);
506 
507   if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
508     {
509       const_rtx t = get_reg_known_value (REGNO (addr));
510       if (t)
511 	addr = t;
512     }
513   return rtx_addr_can_trap_p (addr);
514 }
515 
516 
517 /* Find the condition under which INSN is executed.  If REV is not NULL,
518    it is set to TRUE when the returned comparison should be reversed
519    to get the actual condition.  */
520 static rtx
sched_get_condition_with_rev_uncached(const rtx_insn * insn,bool * rev)521 sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
522 {
523   rtx pat = PATTERN (insn);
524   rtx src;
525 
526   if (rev)
527     *rev = false;
528 
529   if (GET_CODE (pat) == COND_EXEC)
530     return COND_EXEC_TEST (pat);
531 
532   if (!any_condjump_p (insn) || !onlyjump_p (insn))
533     return 0;
534 
535   src = SET_SRC (pc_set (insn));
536 
537   if (XEXP (src, 2) == pc_rtx)
538     return XEXP (src, 0);
539   else if (XEXP (src, 1) == pc_rtx)
540     {
541       rtx cond = XEXP (src, 0);
542       enum rtx_code revcode = reversed_comparison_code (cond, insn);
543 
544       if (revcode == UNKNOWN)
545 	return 0;
546 
547       if (rev)
548 	*rev = true;
549       return cond;
550     }
551 
552   return 0;
553 }
554 
555 /* Return the condition under which INSN does not execute (i.e.  the
556    not-taken condition for a conditional branch), or NULL if we cannot
557    find such a condition.  The caller should make a copy of the condition
558    before using it.  */
559 rtx
sched_get_reverse_condition_uncached(const rtx_insn * insn)560 sched_get_reverse_condition_uncached (const rtx_insn *insn)
561 {
562   bool rev;
563   rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
564   if (cond == NULL_RTX)
565     return cond;
566   if (!rev)
567     {
568       enum rtx_code revcode = reversed_comparison_code (cond, insn);
569       cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
570 			     XEXP (cond, 0),
571 			     XEXP (cond, 1));
572     }
573   return cond;
574 }
575 
576 /* Caching variant of sched_get_condition_with_rev_uncached.
577    We only do actual work the first time we come here for an insn; the
578    results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  */
579 static rtx
sched_get_condition_with_rev(const rtx_insn * insn,bool * rev)580 sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
581 {
582   bool tmp;
583 
584   if (INSN_LUID (insn) == 0)
585     return sched_get_condition_with_rev_uncached (insn, rev);
586 
587   if (INSN_CACHED_COND (insn) == const_true_rtx)
588     return NULL_RTX;
589 
590   if (INSN_CACHED_COND (insn) != NULL_RTX)
591     {
592       if (rev)
593 	*rev = INSN_REVERSE_COND (insn);
594       return INSN_CACHED_COND (insn);
595     }
596 
597   INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
598   INSN_REVERSE_COND (insn) = tmp;
599 
600   if (INSN_CACHED_COND (insn) == NULL_RTX)
601     {
602       INSN_CACHED_COND (insn) = const_true_rtx;
603       return NULL_RTX;
604     }
605 
606   if (rev)
607     *rev = INSN_REVERSE_COND (insn);
608   return INSN_CACHED_COND (insn);
609 }
610 
611 /* True when we can find a condition under which INSN is executed.  */
612 static bool
sched_has_condition_p(const rtx_insn * insn)613 sched_has_condition_p (const rtx_insn *insn)
614 {
615   return !! sched_get_condition_with_rev (insn, NULL);
616 }
617 
618 
619 
620 /* Return nonzero if conditions COND1 and COND2 can never be both true.  */
621 static int
conditions_mutex_p(const_rtx cond1,const_rtx cond2,bool rev1,bool rev2)622 conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
623 {
624   if (COMPARISON_P (cond1)
625       && COMPARISON_P (cond2)
626       && GET_CODE (cond1) ==
627 	  (rev1==rev2
628 	  ? reversed_comparison_code (cond2, NULL)
629 	  : GET_CODE (cond2))
630       && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
631       && XEXP (cond1, 1) == XEXP (cond2, 1))
632     return 1;
633   return 0;
634 }
635 
636 /* Return true if insn1 and insn2 can never depend on one another because
637    the conditions under which they are executed are mutually exclusive.  */
638 bool
sched_insns_conditions_mutex_p(const rtx_insn * insn1,const rtx_insn * insn2)639 sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
640 {
641   rtx cond1, cond2;
642   bool rev1 = false, rev2 = false;
643 
644   /* df doesn't handle conditional lifetimes entirely correctly;
645      calls mess up the conditional lifetimes.  */
646   if (!CALL_P (insn1) && !CALL_P (insn2))
647     {
648       cond1 = sched_get_condition_with_rev (insn1, &rev1);
649       cond2 = sched_get_condition_with_rev (insn2, &rev2);
650       if (cond1 && cond2
651 	  && conditions_mutex_p (cond1, cond2, rev1, rev2)
652 	  /* Make sure first instruction doesn't affect condition of second
653 	     instruction if switched.  */
654 	  && !modified_in_p (cond1, insn2)
655 	  /* Make sure second instruction doesn't affect condition of first
656 	     instruction if switched.  */
657 	  && !modified_in_p (cond2, insn1))
658 	return true;
659     }
660   return false;
661 }
662 
663 
664 /* Return true if INSN can potentially be speculated with type DS.  */
665 bool
sched_insn_is_legitimate_for_speculation_p(const rtx_insn * insn,ds_t ds)666 sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
667 {
668   if (HAS_INTERNAL_DEP (insn))
669     return false;
670 
671   if (!NONJUMP_INSN_P (insn))
672     return false;
673 
674   if (SCHED_GROUP_P (insn))
675     return false;
676 
677   if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
678     return false;
679 
680   if (side_effects_p (PATTERN (insn)))
681     return false;
682 
683   if (ds & BE_IN_SPEC)
684     /* The following instructions, which depend on a speculatively scheduled
685        instruction, cannot be speculatively scheduled along.  */
686     {
687       if (may_trap_or_fault_p (PATTERN (insn)))
688 	/* If instruction might fault, it cannot be speculatively scheduled.
689 	   For control speculation it's obvious why and for data speculation
690 	   it's because the insn might get wrong input if speculation
691 	   wasn't successful.  */
692 	return false;
693 
694       if ((ds & BE_IN_DATA)
695 	  && sched_has_condition_p (insn))
696 	/* If this is a predicated instruction, then it cannot be
697 	   speculatively scheduled.  See PR35659.  */
698 	return false;
699     }
700 
701   return true;
702 }
703 
704 /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
705    initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
706    and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
707    This function is used to switch sd_iterator to the next list.
708    !!! For internal use only.  Might consider moving it to sched-int.h.  */
709 void
sd_next_list(const_rtx insn,sd_list_types_def * types_ptr,deps_list_t * list_ptr,bool * resolved_p_ptr)710 sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
711 	      deps_list_t *list_ptr, bool *resolved_p_ptr)
712 {
713   sd_list_types_def types = *types_ptr;
714 
715   if (types & SD_LIST_HARD_BACK)
716     {
717       *list_ptr = INSN_HARD_BACK_DEPS (insn);
718       *resolved_p_ptr = false;
719       *types_ptr = types & ~SD_LIST_HARD_BACK;
720     }
721   else if (types & SD_LIST_SPEC_BACK)
722     {
723       *list_ptr = INSN_SPEC_BACK_DEPS (insn);
724       *resolved_p_ptr = false;
725       *types_ptr = types & ~SD_LIST_SPEC_BACK;
726     }
727   else if (types & SD_LIST_FORW)
728     {
729       *list_ptr = INSN_FORW_DEPS (insn);
730       *resolved_p_ptr = false;
731       *types_ptr = types & ~SD_LIST_FORW;
732     }
733   else if (types & SD_LIST_RES_BACK)
734     {
735       *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
736       *resolved_p_ptr = true;
737       *types_ptr = types & ~SD_LIST_RES_BACK;
738     }
739   else if (types & SD_LIST_RES_FORW)
740     {
741       *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
742       *resolved_p_ptr = true;
743       *types_ptr = types & ~SD_LIST_RES_FORW;
744     }
745   else
746     {
747       *list_ptr = NULL;
748       *resolved_p_ptr = false;
749       *types_ptr = SD_LIST_NONE;
750     }
751 }
752 
753 /* Return the summary size of INSN's lists defined by LIST_TYPES.  */
754 int
sd_lists_size(const_rtx insn,sd_list_types_def list_types)755 sd_lists_size (const_rtx insn, sd_list_types_def list_types)
756 {
757   int size = 0;
758 
759   while (list_types != SD_LIST_NONE)
760     {
761       deps_list_t list;
762       bool resolved_p;
763 
764       sd_next_list (insn, &list_types, &list, &resolved_p);
765       if (list)
766 	size += DEPS_LIST_N_LINKS (list);
767     }
768 
769   return size;
770 }
771 
772 /* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
773 
774 bool
sd_lists_empty_p(const_rtx insn,sd_list_types_def list_types)775 sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
776 {
777   while (list_types != SD_LIST_NONE)
778     {
779       deps_list_t list;
780       bool resolved_p;
781 
782       sd_next_list (insn, &list_types, &list, &resolved_p);
783       if (!deps_list_empty_p (list))
784 	return false;
785     }
786 
787   return true;
788 }
789 
790 /* Initialize data for INSN.  */
791 void
sd_init_insn(rtx_insn * insn)792 sd_init_insn (rtx_insn *insn)
793 {
794   INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
795   INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
796   INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
797   INSN_FORW_DEPS (insn) = create_deps_list ();
798   INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
799 
800   /* ??? It would be nice to allocate dependency caches here.  */
801 }
802 
803 /* Free data for INSN.  */
804 void
sd_finish_insn(rtx_insn * insn)805 sd_finish_insn (rtx_insn *insn)
806 {
807   /* ??? It would be nice to deallocate dependency caches here.  */
808 
809   free_deps_list (INSN_HARD_BACK_DEPS (insn));
810   INSN_HARD_BACK_DEPS (insn) = NULL;
811 
812   free_deps_list (INSN_SPEC_BACK_DEPS (insn));
813   INSN_SPEC_BACK_DEPS (insn) = NULL;
814 
815   free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
816   INSN_RESOLVED_BACK_DEPS (insn) = NULL;
817 
818   free_deps_list (INSN_FORW_DEPS (insn));
819   INSN_FORW_DEPS (insn) = NULL;
820 
821   free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
822   INSN_RESOLVED_FORW_DEPS (insn) = NULL;
823 }
824 
825 /* Find a dependency between producer PRO and consumer CON.
826    Search through resolved dependency lists if RESOLVED_P is true.
827    If no such dependency is found return NULL,
828    otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
829    with an iterator pointing to it.  */
830 static dep_t
sd_find_dep_between_no_cache(rtx pro,rtx con,bool resolved_p,sd_iterator_def * sd_it_ptr)831 sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
832 			      sd_iterator_def *sd_it_ptr)
833 {
834   sd_list_types_def pro_list_type;
835   sd_list_types_def con_list_type;
836   sd_iterator_def sd_it;
837   dep_t dep;
838   bool found_p = false;
839 
840   if (resolved_p)
841     {
842       pro_list_type = SD_LIST_RES_FORW;
843       con_list_type = SD_LIST_RES_BACK;
844     }
845   else
846     {
847       pro_list_type = SD_LIST_FORW;
848       con_list_type = SD_LIST_BACK;
849     }
850 
851   /* Walk through either back list of INSN or forw list of ELEM
852      depending on which one is shorter.  */
853   if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
854     {
855       /* Find the dep_link with producer PRO in consumer's back_deps.  */
856       FOR_EACH_DEP (con, con_list_type, sd_it, dep)
857 	if (DEP_PRO (dep) == pro)
858 	  {
859 	    found_p = true;
860 	    break;
861 	  }
862     }
863   else
864     {
865       /* Find the dep_link with consumer CON in producer's forw_deps.  */
866       FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
867 	if (DEP_CON (dep) == con)
868 	  {
869 	    found_p = true;
870 	    break;
871 	  }
872     }
873 
874   if (found_p)
875     {
876       if (sd_it_ptr != NULL)
877 	*sd_it_ptr = sd_it;
878 
879       return dep;
880     }
881 
882   return NULL;
883 }
884 
885 /* Find a dependency between producer PRO and consumer CON.
886    Use dependency [if available] to check if dependency is present at all.
887    Search through resolved dependency lists if RESOLVED_P is true.
888    If the dependency or NULL if none found.  */
889 dep_t
sd_find_dep_between(rtx pro,rtx con,bool resolved_p)890 sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
891 {
892   if (true_dependency_cache != NULL)
893     /* Avoiding the list walk below can cut compile times dramatically
894        for some code.  */
895     {
896       int elem_luid = INSN_LUID (pro);
897       int insn_luid = INSN_LUID (con);
898 
899       if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
900 	  && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
901 	  && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
902 	  && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
903 	return NULL;
904     }
905 
906   return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
907 }
908 
909 /* Add or update  a dependence described by DEP.
910    MEM1 and MEM2, if non-null, correspond to memory locations in case of
911    data speculation.
912 
913    The function returns a value indicating if an old entry has been changed
914    or a new entry has been added to insn's backward deps.
915 
916    This function merely checks if producer and consumer is the same insn
917    and doesn't create a dep in this case.  Actual manipulation of
918    dependence data structures is performed in add_or_update_dep_1.  */
919 static enum DEPS_ADJUST_RESULT
maybe_add_or_update_dep_1(dep_t dep,bool resolved_p,rtx mem1,rtx mem2)920 maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
921 {
922   rtx_insn *elem = DEP_PRO (dep);
923   rtx_insn *insn = DEP_CON (dep);
924 
925   gcc_assert (INSN_P (insn) && INSN_P (elem));
926 
927   /* Don't depend an insn on itself.  */
928   if (insn == elem)
929     {
930       if (sched_deps_info->generate_spec_deps)
931         /* INSN has an internal dependence, which we can't overcome.  */
932         HAS_INTERNAL_DEP (insn) = 1;
933 
934       return DEP_NODEP;
935     }
936 
937   return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
938 }
939 
940 /* Ask dependency caches what needs to be done for dependence DEP.
941    Return DEP_CREATED if new dependence should be created and there is no
942    need to try to find one searching the dependencies lists.
943    Return DEP_PRESENT if there already is a dependence described by DEP and
944    hence nothing is to be done.
945    Return DEP_CHANGED if there already is a dependence, but it should be
946    updated to incorporate additional information from DEP.  */
947 static enum DEPS_ADJUST_RESULT
ask_dependency_caches(dep_t dep)948 ask_dependency_caches (dep_t dep)
949 {
950   int elem_luid = INSN_LUID (DEP_PRO (dep));
951   int insn_luid = INSN_LUID (DEP_CON (dep));
952 
953   gcc_assert (true_dependency_cache != NULL
954 	      && output_dependency_cache != NULL
955 	      && anti_dependency_cache != NULL
956 	      && control_dependency_cache != NULL);
957 
958   if (!(current_sched_info->flags & USE_DEPS_LIST))
959     {
960       enum reg_note present_dep_type;
961 
962       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
963 	present_dep_type = REG_DEP_TRUE;
964       else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
965 	present_dep_type = REG_DEP_OUTPUT;
966       else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
967 	present_dep_type = REG_DEP_ANTI;
968       else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
969 	present_dep_type = REG_DEP_CONTROL;
970       else
971 	/* There is no existing dep so it should be created.  */
972 	return DEP_CREATED;
973 
974       if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
975 	/* DEP does not add anything to the existing dependence.  */
976 	return DEP_PRESENT;
977     }
978   else
979     {
980       ds_t present_dep_types = 0;
981 
982       if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
983 	present_dep_types |= DEP_TRUE;
984       if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
985 	present_dep_types |= DEP_OUTPUT;
986       if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
987 	present_dep_types |= DEP_ANTI;
988       if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
989 	present_dep_types |= DEP_CONTROL;
990 
991       if (present_dep_types == 0)
992 	/* There is no existing dep so it should be created.  */
993 	return DEP_CREATED;
994 
995       if (!(current_sched_info->flags & DO_SPECULATION)
996 	  || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
997 	{
998 	  if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
999 	      == present_dep_types)
1000 	    /* DEP does not add anything to the existing dependence.  */
1001 	    return DEP_PRESENT;
1002 	}
1003       else
1004 	{
1005 	  /* Only true dependencies can be data speculative and
1006 	     only anti dependencies can be control speculative.  */
1007 	  gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1008 		      == present_dep_types);
1009 
1010 	  /* if (DEP is SPECULATIVE) then
1011 	     ..we should update DEP_STATUS
1012 	     else
1013 	     ..we should reset existing dep to non-speculative.  */
1014 	}
1015     }
1016 
1017   return DEP_CHANGED;
1018 }
1019 
1020 /* Set dependency caches according to DEP.  */
1021 static void
set_dependency_caches(dep_t dep)1022 set_dependency_caches (dep_t dep)
1023 {
1024   int elem_luid = INSN_LUID (DEP_PRO (dep));
1025   int insn_luid = INSN_LUID (DEP_CON (dep));
1026 
1027   if (!(current_sched_info->flags & USE_DEPS_LIST))
1028     {
1029       switch (DEP_TYPE (dep))
1030 	{
1031 	case REG_DEP_TRUE:
1032 	  bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1033 	  break;
1034 
1035 	case REG_DEP_OUTPUT:
1036 	  bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1037 	  break;
1038 
1039 	case REG_DEP_ANTI:
1040 	  bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1041 	  break;
1042 
1043 	case REG_DEP_CONTROL:
1044 	  bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1045 	  break;
1046 
1047 	default:
1048 	  gcc_unreachable ();
1049 	}
1050     }
1051   else
1052     {
1053       ds_t ds = DEP_STATUS (dep);
1054 
1055       if (ds & DEP_TRUE)
1056 	bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1057       if (ds & DEP_OUTPUT)
1058 	bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1059       if (ds & DEP_ANTI)
1060 	bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1061       if (ds & DEP_CONTROL)
1062 	bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1063 
1064       if (ds & SPECULATIVE)
1065 	{
1066 	  gcc_assert (current_sched_info->flags & DO_SPECULATION);
1067 	  bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1068 	}
1069     }
1070 }
1071 
1072 /* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
1073    caches accordingly.  */
1074 static void
update_dependency_caches(dep_t dep,enum reg_note old_type)1075 update_dependency_caches (dep_t dep, enum reg_note old_type)
1076 {
1077   int elem_luid = INSN_LUID (DEP_PRO (dep));
1078   int insn_luid = INSN_LUID (DEP_CON (dep));
1079 
1080   /* Clear corresponding cache entry because type of the link
1081      may have changed.  Keep them if we use_deps_list.  */
1082   if (!(current_sched_info->flags & USE_DEPS_LIST))
1083     {
1084       switch (old_type)
1085 	{
1086 	case REG_DEP_OUTPUT:
1087 	  bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1088 	  break;
1089 
1090 	case REG_DEP_ANTI:
1091 	  bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1092 	  break;
1093 
1094 	case REG_DEP_CONTROL:
1095 	  bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1096 	  break;
1097 
1098 	default:
1099 	  gcc_unreachable ();
1100 	}
1101     }
1102 
1103   set_dependency_caches (dep);
1104 }
1105 
1106 /* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1107 static void
change_spec_dep_to_hard(sd_iterator_def sd_it)1108 change_spec_dep_to_hard (sd_iterator_def sd_it)
1109 {
1110   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1111   dep_link_t link = DEP_NODE_BACK (node);
1112   dep_t dep = DEP_NODE_DEP (node);
1113   rtx_insn *elem = DEP_PRO (dep);
1114   rtx_insn *insn = DEP_CON (dep);
1115 
1116   move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1117 
1118   DEP_STATUS (dep) &= ~SPECULATIVE;
1119 
1120   if (true_dependency_cache != NULL)
1121     /* Clear the cache entry.  */
1122     bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1123 		      INSN_LUID (elem));
1124 }
1125 
1126 /* Update DEP to incorporate information from NEW_DEP.
1127    SD_IT points to DEP in case it should be moved to another list.
1128    MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1129    data-speculative dependence should be updated.  */
1130 static enum DEPS_ADJUST_RESULT
update_dep(dep_t dep,dep_t new_dep,sd_iterator_def sd_it ATTRIBUTE_UNUSED,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1131 update_dep (dep_t dep, dep_t new_dep,
1132 	    sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1133 	    rtx mem1 ATTRIBUTE_UNUSED,
1134 	    rtx mem2 ATTRIBUTE_UNUSED)
1135 {
1136   enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1137   enum reg_note old_type = DEP_TYPE (dep);
1138   bool was_spec = dep_spec_p (dep);
1139 
1140   DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1141   DEP_MULTIPLE (dep) = 1;
1142 
1143   /* If this is a more restrictive type of dependence than the
1144      existing one, then change the existing dependence to this
1145      type.  */
1146   if ((int) DEP_TYPE (new_dep) < (int) old_type)
1147     {
1148       DEP_TYPE (dep) = DEP_TYPE (new_dep);
1149       res = DEP_CHANGED;
1150     }
1151 
1152   if (current_sched_info->flags & USE_DEPS_LIST)
1153     /* Update DEP_STATUS.  */
1154     {
1155       ds_t dep_status = DEP_STATUS (dep);
1156       ds_t ds = DEP_STATUS (new_dep);
1157       ds_t new_status = ds | dep_status;
1158 
1159       if (new_status & SPECULATIVE)
1160 	{
1161 	  /* Either existing dep or a dep we're adding or both are
1162 	     speculative.  */
1163 	  if (!(ds & SPECULATIVE)
1164 	      || !(dep_status & SPECULATIVE))
1165 	    /* The new dep can't be speculative.  */
1166 	    new_status &= ~SPECULATIVE;
1167 	  else
1168 	    {
1169 	      /* Both are speculative.  Merge probabilities.  */
1170 	      if (mem1 != NULL)
1171 		{
1172 		  dw_t dw;
1173 
1174 		  dw = estimate_dep_weak (mem1, mem2);
1175 		  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1176 		}
1177 
1178 	      new_status = ds_merge (dep_status, ds);
1179 	    }
1180 	}
1181 
1182       ds = new_status;
1183 
1184       if (dep_status != ds)
1185 	{
1186 	  DEP_STATUS (dep) = ds;
1187 	  res = DEP_CHANGED;
1188 	}
1189     }
1190 
1191   if (was_spec && !dep_spec_p (dep))
1192     /* The old dep was speculative, but now it isn't.  */
1193     change_spec_dep_to_hard (sd_it);
1194 
1195   if (true_dependency_cache != NULL
1196       && res == DEP_CHANGED)
1197     update_dependency_caches (dep, old_type);
1198 
1199   return res;
1200 }
1201 
1202 /* Add or update  a dependence described by DEP.
1203    MEM1 and MEM2, if non-null, correspond to memory locations in case of
1204    data speculation.
1205 
1206    The function returns a value indicating if an old entry has been changed
1207    or a new entry has been added to insn's backward deps or nothing has
1208    been updated at all.  */
1209 static enum DEPS_ADJUST_RESULT
add_or_update_dep_1(dep_t new_dep,bool resolved_p,rtx mem1 ATTRIBUTE_UNUSED,rtx mem2 ATTRIBUTE_UNUSED)1210 add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1211 		     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1212 {
1213   bool maybe_present_p = true;
1214   bool present_p = false;
1215 
1216   gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1217 	      && DEP_PRO (new_dep) != DEP_CON (new_dep));
1218 
1219   if (flag_checking)
1220     check_dep (new_dep, mem1 != NULL);
1221 
1222   if (true_dependency_cache != NULL)
1223     {
1224       switch (ask_dependency_caches (new_dep))
1225 	{
1226 	case DEP_PRESENT:
1227 	  dep_t present_dep;
1228 	  sd_iterator_def sd_it;
1229 
1230 	  present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1231 						      DEP_CON (new_dep),
1232 						      resolved_p, &sd_it);
1233 	  DEP_MULTIPLE (present_dep) = 1;
1234 	  return DEP_PRESENT;
1235 
1236 	case DEP_CHANGED:
1237 	  maybe_present_p = true;
1238 	  present_p = true;
1239 	  break;
1240 
1241 	case DEP_CREATED:
1242 	  maybe_present_p = false;
1243 	  present_p = false;
1244 	  break;
1245 
1246 	default:
1247 	  gcc_unreachable ();
1248 	  break;
1249 	}
1250     }
1251 
1252   /* Check that we don't already have this dependence.  */
1253   if (maybe_present_p)
1254     {
1255       dep_t present_dep;
1256       sd_iterator_def sd_it;
1257 
1258       gcc_assert (true_dependency_cache == NULL || present_p);
1259 
1260       present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1261 						  DEP_CON (new_dep),
1262 						  resolved_p, &sd_it);
1263 
1264       if (present_dep != NULL)
1265 	/* We found an existing dependency between ELEM and INSN.  */
1266 	return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1267       else
1268 	/* We didn't find a dep, it shouldn't present in the cache.  */
1269 	gcc_assert (!present_p);
1270     }
1271 
1272   /* Might want to check one level of transitivity to save conses.
1273      This check should be done in maybe_add_or_update_dep_1.
1274      Since we made it to add_or_update_dep_1, we must create
1275      (or update) a link.  */
1276 
1277   if (mem1 != NULL_RTX)
1278     {
1279       gcc_assert (sched_deps_info->generate_spec_deps);
1280       DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1281 					   estimate_dep_weak (mem1, mem2));
1282     }
1283 
1284   sd_add_dep (new_dep, resolved_p);
1285 
1286   return DEP_CREATED;
1287 }
1288 
1289 /* Initialize BACK_LIST_PTR with consumer's backward list and
1290    FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1291    initialize with lists that hold resolved deps.  */
1292 static void
get_back_and_forw_lists(dep_t dep,bool resolved_p,deps_list_t * back_list_ptr,deps_list_t * forw_list_ptr)1293 get_back_and_forw_lists (dep_t dep, bool resolved_p,
1294 			 deps_list_t *back_list_ptr,
1295 			 deps_list_t *forw_list_ptr)
1296 {
1297   rtx_insn *con = DEP_CON (dep);
1298 
1299   if (!resolved_p)
1300     {
1301       if (dep_spec_p (dep))
1302 	*back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1303       else
1304 	*back_list_ptr = INSN_HARD_BACK_DEPS (con);
1305 
1306       *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1307     }
1308   else
1309     {
1310       *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1311       *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1312     }
1313 }
1314 
1315 /* Add dependence described by DEP.
1316    If RESOLVED_P is true treat the dependence as a resolved one.  */
1317 void
sd_add_dep(dep_t dep,bool resolved_p)1318 sd_add_dep (dep_t dep, bool resolved_p)
1319 {
1320   dep_node_t n = create_dep_node ();
1321   deps_list_t con_back_deps;
1322   deps_list_t pro_forw_deps;
1323   rtx_insn *elem = DEP_PRO (dep);
1324   rtx_insn *insn = DEP_CON (dep);
1325 
1326   gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1327 
1328   if ((current_sched_info->flags & DO_SPECULATION) == 0
1329       || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1330     DEP_STATUS (dep) &= ~SPECULATIVE;
1331 
1332   copy_dep (DEP_NODE_DEP (n), dep);
1333 
1334   get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1335 
1336   add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1337 
1338   if (flag_checking)
1339     check_dep (dep, false);
1340 
1341   add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1342 
1343   /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1344      in the bitmap caches of dependency information.  */
1345   if (true_dependency_cache != NULL)
1346     set_dependency_caches (dep);
1347 }
1348 
1349 /* Add or update backward dependence between INSN and ELEM
1350    with given type DEP_TYPE and dep_status DS.
1351    This function is a convenience wrapper.  */
1352 enum DEPS_ADJUST_RESULT
sd_add_or_update_dep(dep_t dep,bool resolved_p)1353 sd_add_or_update_dep (dep_t dep, bool resolved_p)
1354 {
1355   return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1356 }
1357 
1358 /* Resolved dependence pointed to by SD_IT.
1359    SD_IT will advance to the next element.  */
1360 void
sd_resolve_dep(sd_iterator_def sd_it)1361 sd_resolve_dep (sd_iterator_def sd_it)
1362 {
1363   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1364   dep_t dep = DEP_NODE_DEP (node);
1365   rtx_insn *pro = DEP_PRO (dep);
1366   rtx_insn *con = DEP_CON (dep);
1367 
1368   if (dep_spec_p (dep))
1369     move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1370 		   INSN_RESOLVED_BACK_DEPS (con));
1371   else
1372     move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1373 		   INSN_RESOLVED_BACK_DEPS (con));
1374 
1375   move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1376 		 INSN_RESOLVED_FORW_DEPS (pro));
1377 }
1378 
1379 /* Perform the inverse operation of sd_resolve_dep.  Restore the dependence
1380    pointed to by SD_IT to unresolved state.  */
1381 void
sd_unresolve_dep(sd_iterator_def sd_it)1382 sd_unresolve_dep (sd_iterator_def sd_it)
1383 {
1384   dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1385   dep_t dep = DEP_NODE_DEP (node);
1386   rtx_insn *pro = DEP_PRO (dep);
1387   rtx_insn *con = DEP_CON (dep);
1388 
1389   if (dep_spec_p (dep))
1390     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1391 		   INSN_SPEC_BACK_DEPS (con));
1392   else
1393     move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 		   INSN_HARD_BACK_DEPS (con));
1395 
1396   move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1397 		 INSN_FORW_DEPS (pro));
1398 }
1399 
1400 /* Make TO depend on all the FROM's producers.
1401    If RESOLVED_P is true add dependencies to the resolved lists.  */
1402 void
sd_copy_back_deps(rtx_insn * to,rtx_insn * from,bool resolved_p)1403 sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1404 {
1405   sd_list_types_def list_type;
1406   sd_iterator_def sd_it;
1407   dep_t dep;
1408 
1409   list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1410 
1411   FOR_EACH_DEP (from, list_type, sd_it, dep)
1412     {
1413       dep_def _new_dep, *new_dep = &_new_dep;
1414 
1415       copy_dep (new_dep, dep);
1416       DEP_CON (new_dep) = to;
1417       sd_add_dep (new_dep, resolved_p);
1418     }
1419 }
1420 
1421 /* Remove a dependency referred to by SD_IT.
1422    SD_IT will point to the next dependence after removal.  */
1423 void
sd_delete_dep(sd_iterator_def sd_it)1424 sd_delete_dep (sd_iterator_def sd_it)
1425 {
1426   dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1427   dep_t dep = DEP_NODE_DEP (n);
1428   rtx_insn *pro = DEP_PRO (dep);
1429   rtx_insn *con = DEP_CON (dep);
1430   deps_list_t con_back_deps;
1431   deps_list_t pro_forw_deps;
1432 
1433   if (true_dependency_cache != NULL)
1434     {
1435       int elem_luid = INSN_LUID (pro);
1436       int insn_luid = INSN_LUID (con);
1437 
1438       bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1439       bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1440       bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1441       bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1442 
1443       if (current_sched_info->flags & DO_SPECULATION)
1444 	bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1445     }
1446 
1447   get_back_and_forw_lists (dep, sd_it.resolved_p,
1448 			   &con_back_deps, &pro_forw_deps);
1449 
1450   remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1451   remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1452 
1453   delete_dep_node (n);
1454 }
1455 
1456 /* Dump size of the lists.  */
1457 #define DUMP_LISTS_SIZE (2)
1458 
1459 /* Dump dependencies of the lists.  */
1460 #define DUMP_LISTS_DEPS (4)
1461 
1462 /* Dump all information about the lists.  */
1463 #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1464 
1465 /* Dump deps_lists of INSN specified by TYPES to DUMP.
1466    FLAGS is a bit mask specifying what information about the lists needs
1467    to be printed.
1468    If FLAGS has the very first bit set, then dump all information about
1469    the lists and propagate this bit into the callee dump functions.  */
1470 static void
dump_lists(FILE * dump,rtx insn,sd_list_types_def types,int flags)1471 dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1472 {
1473   sd_iterator_def sd_it;
1474   dep_t dep;
1475   int all;
1476 
1477   all = (flags & 1);
1478 
1479   if (all)
1480     flags |= DUMP_LISTS_ALL;
1481 
1482   fprintf (dump, "[");
1483 
1484   if (flags & DUMP_LISTS_SIZE)
1485     fprintf (dump, "%d; ", sd_lists_size (insn, types));
1486 
1487   if (flags & DUMP_LISTS_DEPS)
1488     {
1489       FOR_EACH_DEP (insn, types, sd_it, dep)
1490 	{
1491 	  dump_dep (dump, dep, dump_dep_flags | all);
1492 	  fprintf (dump, " ");
1493 	}
1494     }
1495 }
1496 
1497 /* Dump all information about deps_lists of INSN specified by TYPES
1498    to STDERR.  */
1499 void
sd_debug_lists(rtx insn,sd_list_types_def types)1500 sd_debug_lists (rtx insn, sd_list_types_def types)
1501 {
1502   dump_lists (stderr, insn, types, 1);
1503   fprintf (stderr, "\n");
1504 }
1505 
1506 /* A wrapper around add_dependence_1, to add a dependence of CON on
1507    PRO, with type DEP_TYPE.  This function implements special handling
1508    for REG_DEP_CONTROL dependencies.  For these, we optionally promote
1509    the type to REG_DEP_ANTI if we can determine that predication is
1510    impossible; otherwise we add additional true dependencies on the
1511    INSN_COND_DEPS list of the jump (which PRO must be).  */
1512 void
add_dependence(rtx_insn * con,rtx_insn * pro,enum reg_note dep_type)1513 add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1514 {
1515   if (dep_type == REG_DEP_CONTROL
1516       && !(current_sched_info->flags & DO_PREDICATION))
1517     dep_type = REG_DEP_ANTI;
1518 
1519   /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1520      so we must also make the insn dependent on the setter of the
1521      condition.  */
1522   if (dep_type == REG_DEP_CONTROL)
1523     {
1524       rtx_insn *real_pro = pro;
1525       rtx_insn *other = real_insn_for_shadow (real_pro);
1526       rtx cond;
1527 
1528       if (other != NULL_RTX)
1529 	real_pro = other;
1530       cond = sched_get_reverse_condition_uncached (real_pro);
1531       /* Verify that the insn does not use a different value in
1532 	 the condition register than the one that was present at
1533 	 the jump.  */
1534       if (cond == NULL_RTX)
1535 	dep_type = REG_DEP_ANTI;
1536       else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1537 	{
1538 	  HARD_REG_SET uses;
1539 	  CLEAR_HARD_REG_SET (uses);
1540 	  note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1541 	  if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1542 	    dep_type = REG_DEP_ANTI;
1543 	}
1544       if (dep_type == REG_DEP_CONTROL)
1545 	{
1546 	  if (sched_verbose >= 5)
1547 	    fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1548 		     INSN_UID (real_pro));
1549 	  add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1550 			       REG_DEP_TRUE, false);
1551 	}
1552     }
1553 
1554   add_dependence_1 (con, pro, dep_type);
1555 }
1556 
1557 /* A convenience wrapper to operate on an entire list.  HARD should be
1558    true if DEP_NONREG should be set on newly created dependencies.  */
1559 
1560 static void
add_dependence_list(rtx_insn * insn,rtx_insn_list * list,int uncond,enum reg_note dep_type,bool hard)1561 add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1562 		     enum reg_note dep_type, bool hard)
1563 {
1564   mark_as_hard = hard;
1565   for (; list; list = list->next ())
1566     {
1567       if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1568 	add_dependence (insn, list->insn (), dep_type);
1569     }
1570   mark_as_hard = false;
1571 }
1572 
1573 /* Similar, but free *LISTP at the same time, when the context
1574    is not readonly.  HARD should be true if DEP_NONREG should be set on
1575    newly created dependencies.  */
1576 
1577 static void
add_dependence_list_and_free(class deps_desc * deps,rtx_insn * insn,rtx_insn_list ** listp,int uncond,enum reg_note dep_type,bool hard)1578 add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
1579 			      rtx_insn_list **listp,
1580                               int uncond, enum reg_note dep_type, bool hard)
1581 {
1582   add_dependence_list (insn, *listp, uncond, dep_type, hard);
1583 
1584   /* We don't want to short-circuit dependencies involving debug
1585      insns, because they may cause actual dependencies to be
1586      disregarded.  */
1587   if (deps->readonly || DEBUG_INSN_P (insn))
1588     return;
1589 
1590   free_INSN_LIST_list (listp);
1591 }
1592 
1593 /* Remove all occurrences of INSN from LIST.  Return the number of
1594    occurrences removed.  */
1595 
1596 static int
remove_from_dependence_list(rtx_insn * insn,rtx_insn_list ** listp)1597 remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1598 {
1599   int removed = 0;
1600 
1601   while (*listp)
1602     {
1603       if ((*listp)->insn () == insn)
1604         {
1605           remove_free_INSN_LIST_node (listp);
1606           removed++;
1607           continue;
1608         }
1609 
1610       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1611     }
1612 
1613   return removed;
1614 }
1615 
1616 /* Same as above, but process two lists at once.  */
1617 static int
remove_from_both_dependence_lists(rtx_insn * insn,rtx_insn_list ** listp,rtx_expr_list ** exprp)1618 remove_from_both_dependence_lists (rtx_insn *insn,
1619 				   rtx_insn_list **listp,
1620 				   rtx_expr_list **exprp)
1621 {
1622   int removed = 0;
1623 
1624   while (*listp)
1625     {
1626       if (XEXP (*listp, 0) == insn)
1627         {
1628           remove_free_INSN_LIST_node (listp);
1629           remove_free_EXPR_LIST_node (exprp);
1630           removed++;
1631           continue;
1632         }
1633 
1634       listp = (rtx_insn_list **)&XEXP (*listp, 1);
1635       exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1636     }
1637 
1638   return removed;
1639 }
1640 
1641 /* Clear all dependencies for an insn.  */
1642 static void
delete_all_dependences(rtx_insn * insn)1643 delete_all_dependences (rtx_insn *insn)
1644 {
1645   sd_iterator_def sd_it;
1646   dep_t dep;
1647 
1648   /* The below cycle can be optimized to clear the caches and back_deps
1649      in one call but that would provoke duplication of code from
1650      delete_dep ().  */
1651 
1652   for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653        sd_iterator_cond (&sd_it, &dep);)
1654     sd_delete_dep (sd_it);
1655 }
1656 
1657 /* All insns in a scheduling group except the first should only have
1658    dependencies on the previous insn in the group.  So we find the
1659    first instruction in the scheduling group by walking the dependence
1660    chains backwards. Then we add the dependencies for the group to
1661    the previous nonnote insn.  */
1662 
1663 static void
chain_to_prev_insn(rtx_insn * insn)1664 chain_to_prev_insn (rtx_insn *insn)
1665 {
1666   sd_iterator_def sd_it;
1667   dep_t dep;
1668   rtx_insn *prev_nonnote;
1669 
1670   FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1671     {
1672       rtx_insn *i = insn;
1673       rtx_insn *pro = DEP_PRO (dep);
1674 
1675       do
1676 	{
1677 	  i = prev_nonnote_insn (i);
1678 
1679 	  if (pro == i)
1680 	    goto next_link;
1681 	} while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1682 
1683       if (! sched_insns_conditions_mutex_p (i, pro))
1684 	add_dependence (i, pro, DEP_TYPE (dep));
1685     next_link:;
1686     }
1687 
1688   delete_all_dependences (insn);
1689 
1690   prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691   if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1692       && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1693     add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 }
1695 
1696 /* Process an insn's memory dependencies.  There are four kinds of
1697    dependencies:
1698 
1699    (0) read dependence: read follows read
1700    (1) true dependence: read follows write
1701    (2) output dependence: write follows write
1702    (3) anti dependence: write follows read
1703 
1704    We are careful to build only dependencies which actually exist, and
1705    use transitivity to avoid building too many links.  */
1706 
1707 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708    The MEM is a memory reference contained within INSN, which we are saving
1709    so that we can do memory aliasing on it.  */
1710 
1711 static void
add_insn_mem_dependence(class deps_desc * deps,bool read_p,rtx_insn * insn,rtx mem)1712 add_insn_mem_dependence (class deps_desc *deps, bool read_p,
1713 			 rtx_insn *insn, rtx mem)
1714 {
1715   rtx_insn_list **insn_list;
1716   rtx_insn_list *insn_node;
1717   rtx_expr_list **mem_list;
1718   rtx_expr_list *mem_node;
1719 
1720   gcc_assert (!deps->readonly);
1721   if (read_p)
1722     {
1723       insn_list = &deps->pending_read_insns;
1724       mem_list = &deps->pending_read_mems;
1725       if (!DEBUG_INSN_P (insn))
1726 	deps->pending_read_list_length++;
1727     }
1728   else
1729     {
1730       insn_list = &deps->pending_write_insns;
1731       mem_list = &deps->pending_write_mems;
1732       deps->pending_write_list_length++;
1733     }
1734 
1735   insn_node = alloc_INSN_LIST (insn, *insn_list);
1736   *insn_list = insn_node;
1737 
1738   if (sched_deps_info->use_cselib)
1739     {
1740       mem = shallow_copy_rtx (mem);
1741       XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1742 							GET_MODE (mem), insn);
1743     }
1744   mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1745   *mem_list = mem_node;
1746 }
1747 
1748 /* Make a dependency between every memory reference on the pending lists
1749    and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1750    dependencies for a read operation, similarly with FOR_WRITE.  */
1751 
1752 static void
flush_pending_lists(class deps_desc * deps,rtx_insn * insn,int for_read,int for_write)1753 flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
1754 		     int for_write)
1755 {
1756   if (for_write)
1757     {
1758       add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1759                                     1, REG_DEP_ANTI, true);
1760       if (!deps->readonly)
1761         {
1762           free_EXPR_LIST_list (&deps->pending_read_mems);
1763           deps->pending_read_list_length = 0;
1764         }
1765     }
1766 
1767   add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1768 				for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1769 				true);
1770 
1771   add_dependence_list_and_free (deps, insn,
1772                                 &deps->last_pending_memory_flush, 1,
1773                                 for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1774 				true);
1775 
1776   add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1777 				REG_DEP_ANTI, true);
1778 
1779   if (DEBUG_INSN_P (insn))
1780     {
1781       if (for_write)
1782 	free_INSN_LIST_list (&deps->pending_read_insns);
1783       free_INSN_LIST_list (&deps->pending_write_insns);
1784       free_INSN_LIST_list (&deps->last_pending_memory_flush);
1785       free_INSN_LIST_list (&deps->pending_jump_insns);
1786     }
1787 
1788   if (!deps->readonly)
1789     {
1790       free_EXPR_LIST_list (&deps->pending_write_mems);
1791       deps->pending_write_list_length = 0;
1792 
1793       deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1794       deps->pending_flush_length = 1;
1795     }
1796   mark_as_hard = false;
1797 }
1798 
1799 /* Instruction which dependencies we are analyzing.  */
1800 static rtx_insn *cur_insn = NULL;
1801 
1802 /* Implement hooks for haifa scheduler.  */
1803 
1804 static void
haifa_start_insn(rtx_insn * insn)1805 haifa_start_insn (rtx_insn *insn)
1806 {
1807   gcc_assert (insn && !cur_insn);
1808 
1809   cur_insn = insn;
1810 }
1811 
1812 static void
haifa_finish_insn(void)1813 haifa_finish_insn (void)
1814 {
1815   cur_insn = NULL;
1816 }
1817 
1818 void
haifa_note_reg_set(int regno)1819 haifa_note_reg_set (int regno)
1820 {
1821   SET_REGNO_REG_SET (reg_pending_sets, regno);
1822 }
1823 
1824 void
haifa_note_reg_clobber(int regno)1825 haifa_note_reg_clobber (int regno)
1826 {
1827   SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1828 }
1829 
1830 void
haifa_note_reg_use(int regno)1831 haifa_note_reg_use (int regno)
1832 {
1833   SET_REGNO_REG_SET (reg_pending_uses, regno);
1834 }
1835 
1836 static void
haifa_note_mem_dep(rtx mem,rtx pending_mem,rtx_insn * pending_insn,ds_t ds)1837 haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1838 {
1839   if (!(ds & SPECULATIVE))
1840     {
1841       mem = NULL_RTX;
1842       pending_mem = NULL_RTX;
1843     }
1844   else
1845     gcc_assert (ds & BEGIN_DATA);
1846 
1847   {
1848     dep_def _dep, *dep = &_dep;
1849 
1850     init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1851                 current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1852     DEP_NONREG (dep) = 1;
1853     maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1854   }
1855 
1856 }
1857 
1858 static void
haifa_note_dep(rtx_insn * elem,ds_t ds)1859 haifa_note_dep (rtx_insn *elem, ds_t ds)
1860 {
1861   dep_def _dep;
1862   dep_t dep = &_dep;
1863 
1864   init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1865   if (mark_as_hard)
1866     DEP_NONREG (dep) = 1;
1867   maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1868 }
1869 
1870 static void
note_reg_use(int r)1871 note_reg_use (int r)
1872 {
1873   if (sched_deps_info->note_reg_use)
1874     sched_deps_info->note_reg_use (r);
1875 }
1876 
1877 static void
note_reg_set(int r)1878 note_reg_set (int r)
1879 {
1880   if (sched_deps_info->note_reg_set)
1881     sched_deps_info->note_reg_set (r);
1882 }
1883 
1884 static void
note_reg_clobber(int r)1885 note_reg_clobber (int r)
1886 {
1887   if (sched_deps_info->note_reg_clobber)
1888     sched_deps_info->note_reg_clobber (r);
1889 }
1890 
1891 static void
note_mem_dep(rtx m1,rtx m2,rtx_insn * e,ds_t ds)1892 note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1893 {
1894   if (sched_deps_info->note_mem_dep)
1895     sched_deps_info->note_mem_dep (m1, m2, e, ds);
1896 }
1897 
1898 static void
note_dep(rtx_insn * e,ds_t ds)1899 note_dep (rtx_insn *e, ds_t ds)
1900 {
1901   if (sched_deps_info->note_dep)
1902     sched_deps_info->note_dep (e, ds);
1903 }
1904 
1905 /* Return corresponding to DS reg_note.  */
1906 enum reg_note
ds_to_dt(ds_t ds)1907 ds_to_dt (ds_t ds)
1908 {
1909   if (ds & DEP_TRUE)
1910     return REG_DEP_TRUE;
1911   else if (ds & DEP_OUTPUT)
1912     return REG_DEP_OUTPUT;
1913   else if (ds & DEP_ANTI)
1914     return REG_DEP_ANTI;
1915   else
1916     {
1917       gcc_assert (ds & DEP_CONTROL);
1918       return REG_DEP_CONTROL;
1919     }
1920 }
1921 
1922 
1923 
1924 /* Functions for computation of info needed for register pressure
1925    sensitive insn scheduling.  */
1926 
1927 
1928 /* Allocate and return reg_use_data structure for REGNO and INSN.  */
1929 static struct reg_use_data *
create_insn_reg_use(int regno,rtx_insn * insn)1930 create_insn_reg_use (int regno, rtx_insn *insn)
1931 {
1932   struct reg_use_data *use;
1933 
1934   use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1935   use->regno = regno;
1936   use->insn = insn;
1937   use->next_insn_use = INSN_REG_USE_LIST (insn);
1938   INSN_REG_USE_LIST (insn) = use;
1939   return use;
1940 }
1941 
1942 /* Allocate reg_set_data structure for REGNO and INSN.  */
1943 static void
create_insn_reg_set(int regno,rtx insn)1944 create_insn_reg_set (int regno, rtx insn)
1945 {
1946   struct reg_set_data *set;
1947 
1948   set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1949   set->regno = regno;
1950   set->insn = insn;
1951   set->next_insn_set = INSN_REG_SET_LIST (insn);
1952   INSN_REG_SET_LIST (insn) = set;
1953 }
1954 
1955 /* Set up insn register uses for INSN and dependency context DEPS.  */
1956 static void
setup_insn_reg_uses(class deps_desc * deps,rtx_insn * insn)1957 setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
1958 {
1959   unsigned i;
1960   reg_set_iterator rsi;
1961   struct reg_use_data *use, *use2, *next;
1962   struct deps_reg *reg_last;
1963 
1964   EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1965     {
1966       if (i < FIRST_PSEUDO_REGISTER
1967 	  && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1968 	continue;
1969 
1970       if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1971 	  && ! REGNO_REG_SET_P (reg_pending_sets, i)
1972 	  && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1973 	/* Ignore use which is not dying.  */
1974 	continue;
1975 
1976       use = create_insn_reg_use (i, insn);
1977       use->next_regno_use = use;
1978       reg_last = &deps->reg_last[i];
1979 
1980       /* Create the cycle list of uses.  */
1981       for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1982 	{
1983 	  use2 = create_insn_reg_use (i, list->insn ());
1984 	  next = use->next_regno_use;
1985 	  use->next_regno_use = use2;
1986 	  use2->next_regno_use = next;
1987 	}
1988     }
1989 }
1990 
1991 /* Register pressure info for the currently processed insn.  */
1992 static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1993 
1994 /* Return TRUE if INSN has the use structure for REGNO.  */
1995 static bool
insn_use_p(rtx insn,int regno)1996 insn_use_p (rtx insn, int regno)
1997 {
1998   struct reg_use_data *use;
1999 
2000   for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2001     if (use->regno == regno)
2002       return true;
2003   return false;
2004 }
2005 
2006 /* Update the register pressure info after birth of pseudo register REGNO
2007    in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
2008    the register is in clobber or unused after the insn.  */
2009 static void
mark_insn_pseudo_birth(rtx insn,int regno,bool clobber_p,bool unused_p)2010 mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2011 {
2012   int incr, new_incr;
2013   enum reg_class cl;
2014 
2015   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2016   cl = sched_regno_pressure_class[regno];
2017   if (cl != NO_REGS)
2018     {
2019       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2020       if (clobber_p)
2021 	{
2022 	  new_incr = reg_pressure_info[cl].clobber_increase + incr;
2023 	  reg_pressure_info[cl].clobber_increase = new_incr;
2024 	}
2025       else if (unused_p)
2026 	{
2027 	  new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2028 	  reg_pressure_info[cl].unused_set_increase = new_incr;
2029 	}
2030       else
2031 	{
2032 	  new_incr = reg_pressure_info[cl].set_increase + incr;
2033 	  reg_pressure_info[cl].set_increase = new_incr;
2034 	  if (! insn_use_p (insn, regno))
2035 	    reg_pressure_info[cl].change += incr;
2036 	  create_insn_reg_set (regno, insn);
2037 	}
2038       gcc_assert (new_incr < (1 << INCREASE_BITS));
2039     }
2040 }
2041 
2042 /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2043    hard registers involved in the birth.  */
2044 static void
mark_insn_hard_regno_birth(rtx insn,int regno,int nregs,bool clobber_p,bool unused_p)2045 mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2046 			    bool clobber_p, bool unused_p)
2047 {
2048   enum reg_class cl;
2049   int new_incr, last = regno + nregs;
2050 
2051   while (regno < last)
2052     {
2053       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2054       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2055 	{
2056 	  cl = sched_regno_pressure_class[regno];
2057 	  if (cl != NO_REGS)
2058 	    {
2059 	      if (clobber_p)
2060 		{
2061 		  new_incr = reg_pressure_info[cl].clobber_increase + 1;
2062 		  reg_pressure_info[cl].clobber_increase = new_incr;
2063 		}
2064 	      else if (unused_p)
2065 		{
2066 		  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2067 		  reg_pressure_info[cl].unused_set_increase = new_incr;
2068 		}
2069 	      else
2070 		{
2071 		  new_incr = reg_pressure_info[cl].set_increase + 1;
2072 		  reg_pressure_info[cl].set_increase = new_incr;
2073 		  if (! insn_use_p (insn, regno))
2074 		    reg_pressure_info[cl].change += 1;
2075 		  create_insn_reg_set (regno, insn);
2076 		}
2077 	      gcc_assert (new_incr < (1 << INCREASE_BITS));
2078 	    }
2079 	}
2080       regno++;
2081     }
2082 }
2083 
2084 /* Update the register pressure info after birth of pseudo or hard
2085    register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
2086    correspondingly that the register is in clobber or unused after the
2087    insn.  */
2088 static void
mark_insn_reg_birth(rtx insn,rtx reg,bool clobber_p,bool unused_p)2089 mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2090 {
2091   int regno;
2092 
2093   if (GET_CODE (reg) == SUBREG)
2094     reg = SUBREG_REG (reg);
2095 
2096   if (! REG_P (reg))
2097     return;
2098 
2099   regno = REGNO (reg);
2100   if (regno < FIRST_PSEUDO_REGISTER)
2101     mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2102 				clobber_p, unused_p);
2103   else
2104     mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2105 }
2106 
2107 /* Update the register pressure info after death of pseudo register
2108    REGNO.  */
2109 static void
mark_pseudo_death(int regno)2110 mark_pseudo_death (int regno)
2111 {
2112   int incr;
2113   enum reg_class cl;
2114 
2115   gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2116   cl = sched_regno_pressure_class[regno];
2117   if (cl != NO_REGS)
2118     {
2119       incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2120       reg_pressure_info[cl].change -= incr;
2121     }
2122 }
2123 
2124 /* Like mark_pseudo_death except that NREGS saying how many hard
2125    registers involved in the death.  */
2126 static void
mark_hard_regno_death(int regno,int nregs)2127 mark_hard_regno_death (int regno, int nregs)
2128 {
2129   enum reg_class cl;
2130   int last = regno + nregs;
2131 
2132   while (regno < last)
2133     {
2134       gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2135       if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2136 	{
2137 	  cl = sched_regno_pressure_class[regno];
2138 	  if (cl != NO_REGS)
2139 	    reg_pressure_info[cl].change -= 1;
2140 	}
2141       regno++;
2142     }
2143 }
2144 
2145 /* Update the register pressure info after death of pseudo or hard
2146    register REG.  */
2147 static void
mark_reg_death(rtx reg)2148 mark_reg_death (rtx reg)
2149 {
2150   int regno;
2151 
2152   if (GET_CODE (reg) == SUBREG)
2153     reg = SUBREG_REG (reg);
2154 
2155   if (! REG_P (reg))
2156     return;
2157 
2158   regno = REGNO (reg);
2159   if (regno < FIRST_PSEUDO_REGISTER)
2160     mark_hard_regno_death (regno, REG_NREGS (reg));
2161   else
2162     mark_pseudo_death (regno);
2163 }
2164 
2165 /* Process SETTER of REG.  DATA is an insn containing the setter.  */
2166 static void
mark_insn_reg_store(rtx reg,const_rtx setter,void * data)2167 mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2168 {
2169   if (setter != NULL_RTX && GET_CODE (setter) != SET)
2170     return;
2171   mark_insn_reg_birth
2172     ((rtx) data, reg, false,
2173      find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2174 }
2175 
2176 /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
2177 static void
mark_insn_reg_clobber(rtx reg,const_rtx setter,void * data)2178 mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2179 {
2180   if (GET_CODE (setter) == CLOBBER)
2181     mark_insn_reg_birth ((rtx) data, reg, true, false);
2182 }
2183 
2184 /* Set up reg pressure info related to INSN.  */
2185 void
init_insn_reg_pressure_info(rtx_insn * insn)2186 init_insn_reg_pressure_info (rtx_insn *insn)
2187 {
2188   int i, len;
2189   enum reg_class cl;
2190   static struct reg_pressure_data *pressure_info;
2191   rtx link;
2192 
2193   gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2194 
2195   if (! INSN_P (insn))
2196     return;
2197 
2198   for (i = 0; i < ira_pressure_classes_num; i++)
2199     {
2200       cl = ira_pressure_classes[i];
2201       reg_pressure_info[cl].clobber_increase = 0;
2202       reg_pressure_info[cl].set_increase = 0;
2203       reg_pressure_info[cl].unused_set_increase = 0;
2204       reg_pressure_info[cl].change = 0;
2205     }
2206 
2207   note_stores (insn, mark_insn_reg_clobber, insn);
2208 
2209   note_stores (insn, mark_insn_reg_store, insn);
2210 
2211   if (AUTO_INC_DEC)
2212     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2213       if (REG_NOTE_KIND (link) == REG_INC)
2214 	mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2215 
2216   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2217     if (REG_NOTE_KIND (link) == REG_DEAD)
2218       mark_reg_death (XEXP (link, 0));
2219 
2220   len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2221   pressure_info
2222     = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2223   if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2224     INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2225 						    * sizeof (int), 1);
2226   for (i = 0; i < ira_pressure_classes_num; i++)
2227     {
2228       cl = ira_pressure_classes[i];
2229       pressure_info[i].clobber_increase
2230 	= reg_pressure_info[cl].clobber_increase;
2231       pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2232       pressure_info[i].unused_set_increase
2233 	= reg_pressure_info[cl].unused_set_increase;
2234       pressure_info[i].change = reg_pressure_info[cl].change;
2235     }
2236 }
2237 
2238 
2239 
2240 
2241 /* Internal variable for sched_analyze_[12] () functions.
2242    If it is nonzero, this means that sched_analyze_[12] looks
2243    at the most toplevel SET.  */
2244 static bool can_start_lhs_rhs_p;
2245 
2246 /* Extend reg info for the deps context DEPS given that
2247    we have just generated a register numbered REGNO.  */
2248 static void
extend_deps_reg_info(class deps_desc * deps,int regno)2249 extend_deps_reg_info (class deps_desc *deps, int regno)
2250 {
2251   int max_regno = regno + 1;
2252 
2253   gcc_assert (!reload_completed);
2254 
2255   /* In a readonly context, it would not hurt to extend info,
2256      but it should not be needed.  */
2257   if (reload_completed && deps->readonly)
2258     {
2259       deps->max_reg = max_regno;
2260       return;
2261     }
2262 
2263   if (max_regno > deps->max_reg)
2264     {
2265       deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2266                                    max_regno);
2267       memset (&deps->reg_last[deps->max_reg],
2268               0, (max_regno - deps->max_reg)
2269               * sizeof (struct deps_reg));
2270       deps->max_reg = max_regno;
2271     }
2272 }
2273 
2274 /* Extends REG_INFO_P if needed.  */
2275 void
maybe_extend_reg_info_p(void)2276 maybe_extend_reg_info_p (void)
2277 {
2278   /* Extend REG_INFO_P, if needed.  */
2279   if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2280     {
2281       size_t new_reg_info_p_size = max_regno + 128;
2282 
2283       gcc_assert (!reload_completed && sel_sched_p ());
2284 
2285       reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2286                                                     new_reg_info_p_size,
2287                                                     reg_info_p_size,
2288                                                     sizeof (*reg_info_p));
2289       reg_info_p_size = new_reg_info_p_size;
2290     }
2291 }
2292 
2293 /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2294    The type of the reference is specified by REF and can be SET,
2295    CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2296 
2297 static void
sched_analyze_reg(class deps_desc * deps,int regno,machine_mode mode,enum rtx_code ref,rtx_insn * insn)2298 sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
2299 		   enum rtx_code ref, rtx_insn *insn)
2300 {
2301   /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2302   if (!reload_completed && sel_sched_p ()
2303       && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2304     extend_deps_reg_info (deps, regno);
2305 
2306   maybe_extend_reg_info_p ();
2307 
2308   /* A hard reg in a wide mode may really be multiple registers.
2309      If so, mark all of them just like the first.  */
2310   if (regno < FIRST_PSEUDO_REGISTER)
2311     {
2312       int i = hard_regno_nregs (regno, mode);
2313       if (ref == SET)
2314 	{
2315 	  while (--i >= 0)
2316 	    note_reg_set (regno + i);
2317 	}
2318       else if (ref == USE)
2319 	{
2320 	  while (--i >= 0)
2321 	    note_reg_use (regno + i);
2322 	}
2323       else
2324 	{
2325 	  while (--i >= 0)
2326 	    note_reg_clobber (regno + i);
2327 	}
2328     }
2329 
2330   /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2331      it does not reload.  Ignore these as they have served their
2332      purpose already.  */
2333   else if (regno >= deps->max_reg)
2334     {
2335       enum rtx_code code = GET_CODE (PATTERN (insn));
2336       gcc_assert (code == USE || code == CLOBBER);
2337     }
2338 
2339   else
2340     {
2341       if (ref == SET)
2342 	note_reg_set (regno);
2343       else if (ref == USE)
2344 	note_reg_use (regno);
2345       else
2346 	note_reg_clobber (regno);
2347 
2348       /* Pseudos that are REG_EQUIV to something may be replaced
2349 	 by that during reloading.  We need only add dependencies for
2350 	the address in the REG_EQUIV note.  */
2351       if (!reload_completed && get_reg_known_equiv_p (regno))
2352 	{
2353 	  rtx t = get_reg_known_value (regno);
2354 	  if (MEM_P (t))
2355 	    sched_analyze_2 (deps, XEXP (t, 0), insn);
2356 	}
2357 
2358       /* Don't let it cross a call after scheduling if it doesn't
2359 	 already cross one.  */
2360       if (REG_N_CALLS_CROSSED (regno) == 0)
2361 	{
2362 	  if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2363 	    deps->sched_before_next_call
2364 	      = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2365 	  else
2366 	    add_dependence_list (insn, deps->last_function_call, 1,
2367 				 REG_DEP_ANTI, false);
2368 	}
2369     }
2370 }
2371 
2372 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2373    rtx, X, creating all dependencies generated by the write to the
2374    destination of X, and reads of everything mentioned.  */
2375 
2376 static void
sched_analyze_1(class deps_desc * deps,rtx x,rtx_insn * insn)2377 sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
2378 {
2379   rtx dest = XEXP (x, 0);
2380   enum rtx_code code = GET_CODE (x);
2381   bool cslr_p = can_start_lhs_rhs_p;
2382 
2383   can_start_lhs_rhs_p = false;
2384 
2385   gcc_assert (dest);
2386   if (dest == 0)
2387     return;
2388 
2389   if (cslr_p && sched_deps_info->start_lhs)
2390     sched_deps_info->start_lhs (dest);
2391 
2392   if (GET_CODE (dest) == PARALLEL)
2393     {
2394       int i;
2395 
2396       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2397 	if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2398 	  sched_analyze_1 (deps,
2399 			   gen_rtx_CLOBBER (VOIDmode,
2400 					    XEXP (XVECEXP (dest, 0, i), 0)),
2401 			   insn);
2402 
2403       if (cslr_p && sched_deps_info->finish_lhs)
2404 	sched_deps_info->finish_lhs ();
2405 
2406       if (code == SET)
2407 	{
2408 	  can_start_lhs_rhs_p = cslr_p;
2409 
2410 	  sched_analyze_2 (deps, SET_SRC (x), insn);
2411 
2412 	  can_start_lhs_rhs_p = false;
2413 	}
2414 
2415       return;
2416     }
2417 
2418   while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2419 	 || GET_CODE (dest) == ZERO_EXTRACT)
2420     {
2421       if (GET_CODE (dest) == STRICT_LOW_PART
2422 	 || GET_CODE (dest) == ZERO_EXTRACT
2423 	 || read_modify_subreg_p (dest))
2424         {
2425 	  /* These both read and modify the result.  We must handle
2426              them as writes to get proper dependencies for following
2427              instructions.  We must handle them as reads to get proper
2428              dependencies from this to previous instructions.
2429              Thus we need to call sched_analyze_2.  */
2430 
2431 	  sched_analyze_2 (deps, XEXP (dest, 0), insn);
2432 	}
2433       if (GET_CODE (dest) == ZERO_EXTRACT)
2434 	{
2435 	  /* The second and third arguments are values read by this insn.  */
2436 	  sched_analyze_2 (deps, XEXP (dest, 1), insn);
2437 	  sched_analyze_2 (deps, XEXP (dest, 2), insn);
2438 	}
2439       dest = XEXP (dest, 0);
2440     }
2441 
2442   if (REG_P (dest))
2443     {
2444       int regno = REGNO (dest);
2445       machine_mode mode = GET_MODE (dest);
2446 
2447       sched_analyze_reg (deps, regno, mode, code, insn);
2448 
2449 #ifdef STACK_REGS
2450       /* Treat all writes to a stack register as modifying the TOS.  */
2451       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2452 	{
2453 	  /* Avoid analyzing the same register twice.  */
2454 	  if (regno != FIRST_STACK_REG)
2455 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2456 
2457 	  add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2458 			       FIRST_STACK_REG);
2459 	}
2460 #endif
2461     }
2462   else if (MEM_P (dest))
2463     {
2464       /* Writing memory.  */
2465       rtx t = dest;
2466 
2467       if (sched_deps_info->use_cselib)
2468 	{
2469 	  machine_mode address_mode = get_address_mode (dest);
2470 
2471 	  t = shallow_copy_rtx (dest);
2472 	  cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2473 				   GET_MODE (t), insn);
2474 	  XEXP (t, 0)
2475 	    = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2476 						insn);
2477 	}
2478       t = canon_rtx (t);
2479 
2480       /* Pending lists can't get larger with a readonly context.  */
2481       if (!deps->readonly
2482           && ((deps->pending_read_list_length + deps->pending_write_list_length)
2483 	      >= param_max_pending_list_length))
2484 	{
2485 	  /* Flush all pending reads and writes to prevent the pending lists
2486 	     from getting any larger.  Insn scheduling runs too slowly when
2487 	     these lists get long.  When compiling GCC with itself,
2488 	     this flush occurs 8 times for sparc, and 10 times for m88k using
2489 	     the default value of 32.  */
2490 	  flush_pending_lists (deps, insn, false, true);
2491 	}
2492       else
2493 	{
2494 	  rtx_insn_list *pending;
2495 	  rtx_expr_list *pending_mem;
2496 
2497 	  pending = deps->pending_read_insns;
2498 	  pending_mem = deps->pending_read_mems;
2499 	  while (pending)
2500 	    {
2501 	      if (anti_dependence (pending_mem->element (), t)
2502 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2503 		note_mem_dep (t, pending_mem->element (), pending->insn (),
2504 			      DEP_ANTI);
2505 
2506 	      pending = pending->next ();
2507 	      pending_mem = pending_mem->next ();
2508 	    }
2509 
2510 	  pending = deps->pending_write_insns;
2511 	  pending_mem = deps->pending_write_mems;
2512 	  while (pending)
2513 	    {
2514 	      if (output_dependence (pending_mem->element (), t)
2515 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2516 		note_mem_dep (t, pending_mem->element (),
2517 			      pending->insn (),
2518 			      DEP_OUTPUT);
2519 
2520 	      pending = pending->next ();
2521 	      pending_mem = pending_mem-> next ();
2522 	    }
2523 
2524 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2525 			       REG_DEP_ANTI, true);
2526 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
2527 			       REG_DEP_CONTROL, true);
2528 
2529           if (!deps->readonly)
2530             add_insn_mem_dependence (deps, false, insn, dest);
2531 	}
2532       sched_analyze_2 (deps, XEXP (dest, 0), insn);
2533     }
2534 
2535   if (cslr_p && sched_deps_info->finish_lhs)
2536     sched_deps_info->finish_lhs ();
2537 
2538   /* Analyze reads.  */
2539   if (GET_CODE (x) == SET)
2540     {
2541       can_start_lhs_rhs_p = cslr_p;
2542 
2543       sched_analyze_2 (deps, SET_SRC (x), insn);
2544 
2545       can_start_lhs_rhs_p = false;
2546     }
2547 }
2548 
2549 /* Analyze the uses of memory and registers in rtx X in INSN.  */
2550 static void
sched_analyze_2(class deps_desc * deps,rtx x,rtx_insn * insn)2551 sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
2552 {
2553   int i;
2554   int j;
2555   enum rtx_code code;
2556   const char *fmt;
2557   bool cslr_p = can_start_lhs_rhs_p;
2558 
2559   can_start_lhs_rhs_p = false;
2560 
2561   gcc_assert (x);
2562   if (x == 0)
2563     return;
2564 
2565   if (cslr_p && sched_deps_info->start_rhs)
2566     sched_deps_info->start_rhs (x);
2567 
2568   code = GET_CODE (x);
2569 
2570   switch (code)
2571     {
2572     CASE_CONST_ANY:
2573     case SYMBOL_REF:
2574     case CONST:
2575     case LABEL_REF:
2576       /* Ignore constants.  */
2577       if (cslr_p && sched_deps_info->finish_rhs)
2578 	sched_deps_info->finish_rhs ();
2579 
2580       return;
2581 
2582     case REG:
2583       {
2584 	int regno = REGNO (x);
2585 	machine_mode mode = GET_MODE (x);
2586 
2587 	sched_analyze_reg (deps, regno, mode, USE, insn);
2588 
2589 #ifdef STACK_REGS
2590       /* Treat all reads of a stack register as modifying the TOS.  */
2591       if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2592 	{
2593 	  /* Avoid analyzing the same register twice.  */
2594 	  if (regno != FIRST_STACK_REG)
2595 	    sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2596 	  sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2597 	}
2598 #endif
2599 
2600 	if (cslr_p && sched_deps_info->finish_rhs)
2601 	  sched_deps_info->finish_rhs ();
2602 
2603 	return;
2604       }
2605 
2606     case MEM:
2607       {
2608 	/* Reading memory.  */
2609 	rtx_insn_list *u;
2610 	rtx_insn_list *pending;
2611 	rtx_expr_list *pending_mem;
2612 	rtx t = x;
2613 
2614 	if (sched_deps_info->use_cselib)
2615 	  {
2616 	    machine_mode address_mode = get_address_mode (t);
2617 
2618 	    t = shallow_copy_rtx (t);
2619 	    cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2620 				     GET_MODE (t), insn);
2621 	    XEXP (t, 0)
2622 	      = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2623 						  insn);
2624 	  }
2625 
2626 	if (!DEBUG_INSN_P (insn))
2627 	  {
2628 	    t = canon_rtx (t);
2629 	    pending = deps->pending_read_insns;
2630 	    pending_mem = deps->pending_read_mems;
2631 	    while (pending)
2632 	      {
2633 		if (read_dependence (pending_mem->element (), t)
2634 		    && ! sched_insns_conditions_mutex_p (insn,
2635 							 pending->insn ()))
2636 		  note_mem_dep (t, pending_mem->element (),
2637 				pending->insn (),
2638 				DEP_ANTI);
2639 
2640 		pending = pending->next ();
2641 		pending_mem = pending_mem->next ();
2642 	      }
2643 
2644 	    pending = deps->pending_write_insns;
2645 	    pending_mem = deps->pending_write_mems;
2646 	    while (pending)
2647 	      {
2648 		if (true_dependence (pending_mem->element (), VOIDmode, t)
2649 		    && ! sched_insns_conditions_mutex_p (insn,
2650 							 pending->insn ()))
2651 		  note_mem_dep (t, pending_mem->element (),
2652 				pending->insn (),
2653 				sched_deps_info->generate_spec_deps
2654 				? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2655 
2656 		pending = pending->next ();
2657 		pending_mem = pending_mem->next ();
2658 	      }
2659 
2660 	    for (u = deps->last_pending_memory_flush; u; u = u->next ())
2661 	      add_dependence (insn, u->insn (), REG_DEP_ANTI);
2662 
2663 	    for (u = deps->pending_jump_insns; u; u = u->next ())
2664 	      if (deps_may_trap_p (x))
2665 		{
2666 		  if ((sched_deps_info->generate_spec_deps)
2667 		      && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2668 		    {
2669 		      ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2670 					      MAX_DEP_WEAK);
2671 
2672 		      note_dep (u->insn (), ds);
2673 		    }
2674 		  else
2675 		    add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2676 		}
2677 	  }
2678 
2679 	/* Always add these dependencies to pending_reads, since
2680 	   this insn may be followed by a write.  */
2681 	if (!deps->readonly)
2682 	  {
2683 	    if ((deps->pending_read_list_length
2684 		 + deps->pending_write_list_length)
2685 		>= param_max_pending_list_length
2686 		&& !DEBUG_INSN_P (insn))
2687 	      flush_pending_lists (deps, insn, true, true);
2688 	    add_insn_mem_dependence (deps, true, insn, x);
2689 	  }
2690 
2691 	sched_analyze_2 (deps, XEXP (x, 0), insn);
2692 
2693 	if (cslr_p && sched_deps_info->finish_rhs)
2694 	  sched_deps_info->finish_rhs ();
2695 
2696 	return;
2697       }
2698 
2699     /* Force pending stores to memory in case a trap handler needs them.
2700        Also force pending loads from memory; loads and stores can segfault
2701        and the signal handler won't be triggered if the trap insn was moved
2702        above load or store insn.  */
2703     case TRAP_IF:
2704       flush_pending_lists (deps, insn, true, true);
2705       break;
2706 
2707     case PREFETCH:
2708       if (PREFETCH_SCHEDULE_BARRIER_P (x))
2709 	reg_pending_barrier = TRUE_BARRIER;
2710       /* Prefetch insn contains addresses only.  So if the prefetch
2711 	 address has no registers, there will be no dependencies on
2712 	 the prefetch insn.  This is wrong with result code
2713 	 correctness point of view as such prefetch can be moved below
2714 	 a jump insn which usually generates MOVE_BARRIER preventing
2715 	 to move insns containing registers or memories through the
2716 	 barrier.  It is also wrong with generated code performance
2717 	 point of view as prefetch withouth dependecies will have a
2718 	 tendency to be issued later instead of earlier.  It is hard
2719 	 to generate accurate dependencies for prefetch insns as
2720 	 prefetch has only the start address but it is better to have
2721 	 something than nothing.  */
2722       if (!deps->readonly)
2723 	{
2724 	  rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2725 	  if (sched_deps_info->use_cselib)
2726 	    cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2727 	  add_insn_mem_dependence (deps, true, insn, x);
2728 	}
2729       break;
2730 
2731     case UNSPEC_VOLATILE:
2732       flush_pending_lists (deps, insn, true, true);
2733       /* FALLTHRU */
2734 
2735     case ASM_OPERANDS:
2736     case ASM_INPUT:
2737       {
2738 	/* Traditional and volatile asm instructions must be considered to use
2739 	   and clobber all hard registers, all pseudo-registers and all of
2740 	   memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2741 
2742 	   Consider for instance a volatile asm that changes the fpu rounding
2743 	   mode.  An insn should not be moved across this even if it only uses
2744 	   pseudo-regs because it might give an incorrectly rounded result.  */
2745 	if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2746 	    && !DEBUG_INSN_P (insn))
2747 	  reg_pending_barrier = TRUE_BARRIER;
2748 
2749 	/* For all ASM_OPERANDS, we must traverse the vector of input operands.
2750 	   We cannot just fall through here since then we would be confused
2751 	   by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2752 	   traditional asms unlike their normal usage.  */
2753 
2754 	if (code == ASM_OPERANDS)
2755 	  {
2756 	    for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2757 	      sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2758 
2759 	    if (cslr_p && sched_deps_info->finish_rhs)
2760 	      sched_deps_info->finish_rhs ();
2761 
2762 	    return;
2763 	  }
2764 	break;
2765       }
2766 
2767     case PRE_DEC:
2768     case POST_DEC:
2769     case PRE_INC:
2770     case POST_INC:
2771       /* These both read and modify the result.  We must handle them as writes
2772          to get proper dependencies for following instructions.  We must handle
2773          them as reads to get proper dependencies from this to previous
2774          instructions.  Thus we need to pass them to both sched_analyze_1
2775          and sched_analyze_2.  We must call sched_analyze_2 first in order
2776          to get the proper antecedent for the read.  */
2777       sched_analyze_2 (deps, XEXP (x, 0), insn);
2778       sched_analyze_1 (deps, x, insn);
2779 
2780       if (cslr_p && sched_deps_info->finish_rhs)
2781 	sched_deps_info->finish_rhs ();
2782 
2783       return;
2784 
2785     case POST_MODIFY:
2786     case PRE_MODIFY:
2787       /* op0 = op0 + op1 */
2788       sched_analyze_2 (deps, XEXP (x, 0), insn);
2789       sched_analyze_2 (deps, XEXP (x, 1), insn);
2790       sched_analyze_1 (deps, x, insn);
2791 
2792       if (cslr_p && sched_deps_info->finish_rhs)
2793 	sched_deps_info->finish_rhs ();
2794 
2795       return;
2796 
2797     default:
2798       break;
2799     }
2800 
2801   /* Other cases: walk the insn.  */
2802   fmt = GET_RTX_FORMAT (code);
2803   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2804     {
2805       if (fmt[i] == 'e')
2806 	sched_analyze_2 (deps, XEXP (x, i), insn);
2807       else if (fmt[i] == 'E')
2808 	for (j = 0; j < XVECLEN (x, i); j++)
2809 	  sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2810     }
2811 
2812   if (cslr_p && sched_deps_info->finish_rhs)
2813     sched_deps_info->finish_rhs ();
2814 }
2815 
2816 /* Try to group two fusible insns together to prevent scheduler
2817    from scheduling them apart.  */
2818 
2819 static void
sched_macro_fuse_insns(rtx_insn * insn)2820 sched_macro_fuse_insns (rtx_insn *insn)
2821 {
2822   rtx_insn *prev;
2823   /* No target hook would return true for debug insn as any of the
2824      hook operand, and with very large sequences of only debug insns
2825      where on each we call sched_macro_fuse_insns it has quadratic
2826      compile time complexity.  */
2827   if (DEBUG_INSN_P (insn))
2828     return;
2829   prev = prev_nonnote_nondebug_insn (insn);
2830   if (!prev)
2831     return;
2832 
2833   if (any_condjump_p (insn))
2834     {
2835       unsigned int condreg1, condreg2;
2836       rtx cc_reg_1;
2837       if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
2838 	{
2839 	  cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2840 	  if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2841 	      && modified_in_p (cc_reg_1, prev))
2842 	    {
2843 	      if (targetm.sched.macro_fusion_pair_p (prev, insn))
2844 		SCHED_GROUP_P (insn) = 1;
2845 	      return;
2846 	    }
2847 	}
2848     }
2849 
2850   if (single_set (insn) && single_set (prev))
2851     {
2852       if (targetm.sched.macro_fusion_pair_p (prev, insn))
2853 	SCHED_GROUP_P (insn) = 1;
2854     }
2855 }
2856 
2857 /* Get the implicit reg pending clobbers for INSN and save them in TEMP.  */
2858 void
get_implicit_reg_pending_clobbers(HARD_REG_SET * temp,rtx_insn * insn)2859 get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2860 {
2861   extract_insn (insn);
2862   preprocess_constraints (insn);
2863   alternative_mask preferred = get_preferred_alternatives (insn);
2864   ira_implicitly_set_insn_hard_regs (temp, preferred);
2865   *temp &= ~ira_no_alloc_regs;
2866 }
2867 
2868 /* Analyze an INSN with pattern X to find all dependencies.  */
2869 static void
sched_analyze_insn(class deps_desc * deps,rtx x,rtx_insn * insn)2870 sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
2871 {
2872   RTX_CODE code = GET_CODE (x);
2873   rtx link;
2874   unsigned i;
2875   reg_set_iterator rsi;
2876 
2877   if (! reload_completed)
2878     {
2879       HARD_REG_SET temp;
2880       get_implicit_reg_pending_clobbers (&temp, insn);
2881       implicit_reg_pending_clobbers |= temp;
2882     }
2883 
2884   can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2885 			 && code == SET);
2886 
2887   /* Group compare and branch insns for macro-fusion.  */
2888   if (!deps->readonly
2889       && targetm.sched.macro_fusion_p
2890       && targetm.sched.macro_fusion_p ())
2891     sched_macro_fuse_insns (insn);
2892 
2893   if (may_trap_p (x))
2894     /* Avoid moving trapping instructions across function calls that might
2895        not always return.  */
2896     add_dependence_list (insn, deps->last_function_call_may_noreturn,
2897 			 1, REG_DEP_ANTI, true);
2898 
2899   /* We must avoid creating a situation in which two successors of the
2900      current block have different unwind info after scheduling.  If at any
2901      point the two paths re-join this leads to incorrect unwind info.  */
2902   /* ??? There are certain situations involving a forced frame pointer in
2903      which, with extra effort, we could fix up the unwind info at a later
2904      CFG join.  However, it seems better to notice these cases earlier
2905      during prologue generation and avoid marking the frame pointer setup
2906      as frame-related at all.  */
2907   if (RTX_FRAME_RELATED_P (insn))
2908     {
2909       /* Make sure prologue insn is scheduled before next jump.  */
2910       deps->sched_before_next_jump
2911 	= alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2912 
2913       /* Make sure epilogue insn is scheduled after preceding jumps.  */
2914       add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2915 			   REG_DEP_ANTI, true);
2916       add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2917 			   true);
2918     }
2919 
2920   if (code == COND_EXEC)
2921     {
2922       sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2923 
2924       /* ??? Should be recording conditions so we reduce the number of
2925 	 false dependencies.  */
2926       x = COND_EXEC_CODE (x);
2927       code = GET_CODE (x);
2928     }
2929   if (code == SET || code == CLOBBER)
2930     {
2931       sched_analyze_1 (deps, x, insn);
2932 
2933       /* Bare clobber insns are used for letting life analysis, reg-stack
2934 	 and others know that a value is dead.  Depend on the last call
2935 	 instruction so that reg-stack won't get confused.  */
2936       if (code == CLOBBER)
2937 	add_dependence_list (insn, deps->last_function_call, 1,
2938 			     REG_DEP_OUTPUT, true);
2939     }
2940   else if (code == PARALLEL)
2941     {
2942       for (i = XVECLEN (x, 0); i--;)
2943 	{
2944 	  rtx sub = XVECEXP (x, 0, i);
2945 	  code = GET_CODE (sub);
2946 
2947 	  if (code == COND_EXEC)
2948 	    {
2949 	      sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2950 	      sub = COND_EXEC_CODE (sub);
2951 	      code = GET_CODE (sub);
2952 	    }
2953 	  else if (code == SET || code == CLOBBER)
2954 	    sched_analyze_1 (deps, sub, insn);
2955 	  else
2956 	    sched_analyze_2 (deps, sub, insn);
2957 	}
2958     }
2959   else
2960     sched_analyze_2 (deps, x, insn);
2961 
2962   /* Mark registers CLOBBERED or used by called function.  */
2963   if (CALL_P (insn))
2964     {
2965       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2966 	{
2967 	  if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2968 	    sched_analyze_1 (deps, XEXP (link, 0), insn);
2969 	  else if (GET_CODE (XEXP (link, 0)) != SET)
2970 	    sched_analyze_2 (deps, XEXP (link, 0), insn);
2971 	}
2972       /* Don't schedule anything after a tail call, tail call needs
2973 	 to use at least all call-saved registers.  */
2974       if (SIBLING_CALL_P (insn))
2975 	reg_pending_barrier = TRUE_BARRIER;
2976       else if (find_reg_note (insn, REG_SETJMP, NULL))
2977 	reg_pending_barrier = MOVE_BARRIER;
2978     }
2979 
2980   if (JUMP_P (insn))
2981     {
2982       rtx_insn *next = next_nonnote_nondebug_insn (insn);
2983       /* ??? For tablejumps, the barrier may appear not immediately after
2984          the jump, but after a label and a jump_table_data insn.  */
2985       if (next && LABEL_P (next) && NEXT_INSN (next)
2986 	  && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
2987 	next = NEXT_INSN (NEXT_INSN (next));
2988       if (next && BARRIER_P (next))
2989 	reg_pending_barrier = MOVE_BARRIER;
2990       else
2991 	{
2992 	  rtx_insn_list *pending;
2993 	  rtx_expr_list *pending_mem;
2994 
2995           if (sched_deps_info->compute_jump_reg_dependencies)
2996             {
2997               (*sched_deps_info->compute_jump_reg_dependencies)
2998 		(insn, reg_pending_control_uses);
2999 
3000               /* Make latency of jump equal to 0 by using anti-dependence.  */
3001               EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3002                 {
3003                   struct deps_reg *reg_last = &deps->reg_last[i];
3004                   add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3005 				       false);
3006                   add_dependence_list (insn, reg_last->implicit_sets,
3007 				       0, REG_DEP_ANTI, false);
3008                   add_dependence_list (insn, reg_last->clobbers, 0,
3009 				       REG_DEP_ANTI, false);
3010                 }
3011             }
3012 
3013 	  /* All memory writes and volatile reads must happen before the
3014 	     jump.  Non-volatile reads must happen before the jump iff
3015 	     the result is needed by the above register used mask.  */
3016 
3017 	  pending = deps->pending_write_insns;
3018 	  pending_mem = deps->pending_write_mems;
3019 	  while (pending)
3020 	    {
3021 	      if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3022 		add_dependence (insn, pending->insn (),
3023 				REG_DEP_OUTPUT);
3024 	      pending = pending->next ();
3025 	      pending_mem = pending_mem->next ();
3026 	    }
3027 
3028 	  pending = deps->pending_read_insns;
3029 	  pending_mem = deps->pending_read_mems;
3030 	  while (pending)
3031 	    {
3032 	      if (MEM_VOLATILE_P (pending_mem->element ())
3033 		  && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3034 		add_dependence (insn, pending->insn (),
3035 				REG_DEP_OUTPUT);
3036 	      pending = pending->next ();
3037 	      pending_mem = pending_mem->next ();
3038 	    }
3039 
3040 	  add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3041 			       REG_DEP_ANTI, true);
3042 	  add_dependence_list (insn, deps->pending_jump_insns, 1,
3043 			       REG_DEP_ANTI, true);
3044 	}
3045     }
3046 
3047   /* If this instruction can throw an exception, then moving it changes
3048      where block boundaries fall.  This is mighty confusing elsewhere.
3049      Therefore, prevent such an instruction from being moved.  Same for
3050      non-jump instructions that define block boundaries.
3051      ??? Unclear whether this is still necessary in EBB mode.  If not,
3052      add_branch_dependences should be adjusted for RGN mode instead.  */
3053   if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3054       || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3055     reg_pending_barrier = MOVE_BARRIER;
3056 
3057   if (sched_pressure != SCHED_PRESSURE_NONE)
3058     {
3059       setup_insn_reg_uses (deps, insn);
3060       init_insn_reg_pressure_info (insn);
3061     }
3062 
3063   /* Add register dependencies for insn.  */
3064   if (DEBUG_INSN_P (insn))
3065     {
3066       rtx_insn *prev = deps->last_debug_insn;
3067       rtx_insn_list *u;
3068 
3069       if (!deps->readonly)
3070 	deps->last_debug_insn = insn;
3071 
3072       if (prev)
3073 	add_dependence (insn, prev, REG_DEP_ANTI);
3074 
3075       add_dependence_list (insn, deps->last_function_call, 1,
3076 			   REG_DEP_ANTI, false);
3077 
3078       if (!sel_sched_p ())
3079 	for (u = deps->last_pending_memory_flush; u; u = u->next ())
3080 	  add_dependence (insn, u->insn (), REG_DEP_ANTI);
3081 
3082       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3083 	{
3084 	  struct deps_reg *reg_last = &deps->reg_last[i];
3085 	  add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3086 	  /* There's no point in making REG_DEP_CONTROL dependencies for
3087 	     debug insns.  */
3088 	  add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3089 			       false);
3090 
3091 	  if (!deps->readonly)
3092 	    reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3093 	}
3094       CLEAR_REG_SET (reg_pending_uses);
3095 
3096       /* Quite often, a debug insn will refer to stuff in the
3097 	 previous instruction, but the reason we want this
3098 	 dependency here is to make sure the scheduler doesn't
3099 	 gratuitously move a debug insn ahead.  This could dirty
3100 	 DF flags and cause additional analysis that wouldn't have
3101 	 occurred in compilation without debug insns, and such
3102 	 additional analysis can modify the generated code.  */
3103       prev = PREV_INSN (insn);
3104 
3105       if (prev && NONDEBUG_INSN_P (prev))
3106 	add_dependence (insn, prev, REG_DEP_ANTI);
3107     }
3108   else
3109     {
3110       regset_head set_or_clobbered;
3111 
3112       EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3113 	{
3114 	  struct deps_reg *reg_last = &deps->reg_last[i];
3115 	  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3116 	  add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3117 			       false);
3118 	  add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3119 			       false);
3120 
3121 	  if (!deps->readonly)
3122 	    {
3123 	      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3124 	      reg_last->uses_length++;
3125 	    }
3126 	}
3127 
3128       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3129 	if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3130 	  {
3131 	    struct deps_reg *reg_last = &deps->reg_last[i];
3132 	    add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3133 	    add_dependence_list (insn, reg_last->implicit_sets, 0,
3134 				 REG_DEP_ANTI, false);
3135 	    add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3136 				 false);
3137 
3138 	    if (!deps->readonly)
3139 	      {
3140 		reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3141 		reg_last->uses_length++;
3142 	      }
3143 	  }
3144 
3145       if (targetm.sched.exposed_pipeline)
3146 	{
3147 	  INIT_REG_SET (&set_or_clobbered);
3148 	  bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3149 		      reg_pending_sets);
3150 	  EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3151 	    {
3152 	      struct deps_reg *reg_last = &deps->reg_last[i];
3153 	      rtx list;
3154 	      for (list = reg_last->uses; list; list = XEXP (list, 1))
3155 		{
3156 		  rtx other = XEXP (list, 0);
3157 		  if (INSN_CACHED_COND (other) != const_true_rtx
3158 		      && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3159 		    INSN_CACHED_COND (other) = const_true_rtx;
3160 		}
3161 	    }
3162 	}
3163 
3164       /* If the current insn is conditional, we can't free any
3165 	 of the lists.  */
3166       if (sched_has_condition_p (insn))
3167 	{
3168 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3169 	    {
3170 	      struct deps_reg *reg_last = &deps->reg_last[i];
3171 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3172 				   false);
3173 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3174 				   REG_DEP_ANTI, false);
3175 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3176 				   false);
3177 	      add_dependence_list (insn, reg_last->control_uses, 0,
3178 				   REG_DEP_CONTROL, false);
3179 
3180 	      if (!deps->readonly)
3181 		{
3182 		  reg_last->clobbers
3183 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3184 		  reg_last->clobbers_length++;
3185 		}
3186 	    }
3187 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3188 	    {
3189 	      struct deps_reg *reg_last = &deps->reg_last[i];
3190 	      add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3191 				   false);
3192 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3193 				   REG_DEP_ANTI, false);
3194 	      add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3195 				   false);
3196 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3197 				   false);
3198 	      add_dependence_list (insn, reg_last->control_uses, 0,
3199 				   REG_DEP_CONTROL, false);
3200 
3201 	      if (!deps->readonly)
3202 		reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3203 	    }
3204 	}
3205       else
3206 	{
3207 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3208 	    {
3209 	      struct deps_reg *reg_last = &deps->reg_last[i];
3210 	      if (reg_last->uses_length >= param_max_pending_list_length
3211 		  || reg_last->clobbers_length >= param_max_pending_list_length)
3212 		{
3213 		  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3214 						REG_DEP_OUTPUT, false);
3215 		  add_dependence_list_and_free (deps, insn,
3216 						&reg_last->implicit_sets, 0,
3217 						REG_DEP_ANTI, false);
3218 		  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3219 						REG_DEP_ANTI, false);
3220 		  add_dependence_list_and_free (deps, insn,
3221 						&reg_last->control_uses, 0,
3222 						REG_DEP_ANTI, false);
3223 		  add_dependence_list_and_free (deps, insn,
3224 						&reg_last->clobbers, 0,
3225 						REG_DEP_OUTPUT, false);
3226 
3227 		  if (!deps->readonly)
3228 		    {
3229 		      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3230 		      reg_last->clobbers_length = 0;
3231 		      reg_last->uses_length = 0;
3232 		    }
3233 		}
3234 	      else
3235 		{
3236 		  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3237 				       false);
3238 		  add_dependence_list (insn, reg_last->implicit_sets, 0,
3239 				       REG_DEP_ANTI, false);
3240 		  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3241 				       false);
3242 		  add_dependence_list (insn, reg_last->control_uses, 0,
3243 				       REG_DEP_CONTROL, false);
3244 		}
3245 
3246 	      if (!deps->readonly)
3247 		{
3248 		  reg_last->clobbers_length++;
3249 		  reg_last->clobbers
3250 		    = alloc_INSN_LIST (insn, reg_last->clobbers);
3251 		}
3252 	    }
3253 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3254 	    {
3255 	      struct deps_reg *reg_last = &deps->reg_last[i];
3256 
3257 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3258 					    REG_DEP_OUTPUT, false);
3259 	      add_dependence_list_and_free (deps, insn,
3260 					    &reg_last->implicit_sets,
3261 					    0, REG_DEP_ANTI, false);
3262 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3263 					    REG_DEP_OUTPUT, false);
3264 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3265 					    REG_DEP_ANTI, false);
3266 	      add_dependence_list (insn, reg_last->control_uses, 0,
3267 				   REG_DEP_CONTROL, false);
3268 
3269 	      if (!deps->readonly)
3270 		{
3271 		  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3272 		  reg_last->uses_length = 0;
3273 		  reg_last->clobbers_length = 0;
3274 		}
3275 	    }
3276 	}
3277       if (!deps->readonly)
3278 	{
3279 	  EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3280 	    {
3281 	      struct deps_reg *reg_last = &deps->reg_last[i];
3282 	      reg_last->control_uses
3283 		= alloc_INSN_LIST (insn, reg_last->control_uses);
3284 	    }
3285 	}
3286     }
3287 
3288   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3289     if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3290       {
3291 	struct deps_reg *reg_last = &deps->reg_last[i];
3292 	add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3293 	add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3294 	add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3295 	add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3296 			     false);
3297 
3298 	if (!deps->readonly)
3299 	  reg_last->implicit_sets
3300 	    = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3301       }
3302 
3303   if (!deps->readonly)
3304     {
3305       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3306       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3307       IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3308       IOR_REG_SET_HRS (&deps->reg_last_in_use,
3309 		       implicit_reg_pending_uses
3310 		       | implicit_reg_pending_clobbers);
3311 
3312       /* Set up the pending barrier found.  */
3313       deps->last_reg_pending_barrier = reg_pending_barrier;
3314     }
3315 
3316   CLEAR_REG_SET (reg_pending_uses);
3317   CLEAR_REG_SET (reg_pending_clobbers);
3318   CLEAR_REG_SET (reg_pending_sets);
3319   CLEAR_REG_SET (reg_pending_control_uses);
3320   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3321   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3322 
3323   /* Add dependencies if a scheduling barrier was found.  */
3324   if (reg_pending_barrier)
3325     {
3326       /* In the case of barrier the most added dependencies are not
3327          real, so we use anti-dependence here.  */
3328       if (sched_has_condition_p (insn))
3329 	{
3330 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3331 	    {
3332 	      struct deps_reg *reg_last = &deps->reg_last[i];
3333 	      add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3334 				   true);
3335 	      add_dependence_list (insn, reg_last->sets, 0,
3336 				   reg_pending_barrier == TRUE_BARRIER
3337 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3338 	      add_dependence_list (insn, reg_last->implicit_sets, 0,
3339 				   REG_DEP_ANTI, true);
3340 	      add_dependence_list (insn, reg_last->clobbers, 0,
3341 				   reg_pending_barrier == TRUE_BARRIER
3342 				   ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3343 	    }
3344 	}
3345       else
3346 	{
3347 	  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3348 	    {
3349 	      struct deps_reg *reg_last = &deps->reg_last[i];
3350 	      add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3351 					    REG_DEP_ANTI, true);
3352 	      add_dependence_list_and_free (deps, insn,
3353 					    &reg_last->control_uses, 0,
3354 					    REG_DEP_CONTROL, true);
3355 	      add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3356 					    reg_pending_barrier == TRUE_BARRIER
3357 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3358 					    true);
3359 	      add_dependence_list_and_free (deps, insn,
3360 					    &reg_last->implicit_sets, 0,
3361 					    REG_DEP_ANTI, true);
3362 	      add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3363 					    reg_pending_barrier == TRUE_BARRIER
3364 					    ? REG_DEP_TRUE : REG_DEP_ANTI,
3365 					    true);
3366 
3367               if (!deps->readonly)
3368                 {
3369                   reg_last->uses_length = 0;
3370                   reg_last->clobbers_length = 0;
3371                 }
3372 	    }
3373 	}
3374 
3375       if (!deps->readonly)
3376         for (i = 0; i < (unsigned)deps->max_reg; i++)
3377           {
3378             struct deps_reg *reg_last = &deps->reg_last[i];
3379             reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3380             SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3381           }
3382 
3383       /* Don't flush pending lists on speculative checks for
3384 	 selective scheduling.  */
3385       if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3386 	flush_pending_lists (deps, insn, true, true);
3387 
3388       reg_pending_barrier = NOT_A_BARRIER;
3389     }
3390 
3391   /* If a post-call group is still open, see if it should remain so.
3392      This insn must be a simple move of a hard reg to a pseudo or
3393      vice-versa.
3394 
3395      We must avoid moving these insns for correctness on targets
3396      with small register classes, and for special registers like
3397      PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3398      hard regs for all targets.  */
3399 
3400   if (deps->in_post_call_group_p)
3401     {
3402       rtx tmp, set = single_set (insn);
3403       int src_regno, dest_regno;
3404 
3405       if (set == NULL)
3406 	{
3407 	  if (DEBUG_INSN_P (insn))
3408 	    /* We don't want to mark debug insns as part of the same
3409 	       sched group.  We know they really aren't, but if we use
3410 	       debug insns to tell that a call group is over, we'll
3411 	       get different code if debug insns are not there and
3412 	       instructions that follow seem like they should be part
3413 	       of the call group.
3414 
3415 	       Also, if we did, chain_to_prev_insn would move the
3416 	       deps of the debug insn to the call insn, modifying
3417 	       non-debug post-dependency counts of the debug insn
3418 	       dependencies and otherwise messing with the scheduling
3419 	       order.
3420 
3421 	       Instead, let such debug insns be scheduled freely, but
3422 	       keep the call group open in case there are insns that
3423 	       should be part of it afterwards.  Since we grant debug
3424 	       insns higher priority than even sched group insns, it
3425 	       will all turn out all right.  */
3426 	    goto debug_dont_end_call_group;
3427 	  else
3428 	    goto end_call_group;
3429 	}
3430 
3431       tmp = SET_DEST (set);
3432       if (GET_CODE (tmp) == SUBREG)
3433 	tmp = SUBREG_REG (tmp);
3434       if (REG_P (tmp))
3435 	dest_regno = REGNO (tmp);
3436       else
3437 	goto end_call_group;
3438 
3439       tmp = SET_SRC (set);
3440       if (GET_CODE (tmp) == SUBREG)
3441 	tmp = SUBREG_REG (tmp);
3442       if ((GET_CODE (tmp) == PLUS
3443 	   || GET_CODE (tmp) == MINUS)
3444 	  && REG_P (XEXP (tmp, 0))
3445 	  && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3446 	  && dest_regno == STACK_POINTER_REGNUM)
3447 	src_regno = STACK_POINTER_REGNUM;
3448       else if (REG_P (tmp))
3449 	src_regno = REGNO (tmp);
3450       else
3451 	goto end_call_group;
3452 
3453       if (src_regno < FIRST_PSEUDO_REGISTER
3454 	  || dest_regno < FIRST_PSEUDO_REGISTER)
3455 	{
3456 	  if (!deps->readonly
3457               && deps->in_post_call_group_p == post_call_initial)
3458 	    deps->in_post_call_group_p = post_call;
3459 
3460           if (!sel_sched_p () || sched_emulate_haifa_p)
3461             {
3462               SCHED_GROUP_P (insn) = 1;
3463               CANT_MOVE (insn) = 1;
3464             }
3465 	}
3466       else
3467 	{
3468 	end_call_group:
3469           if (!deps->readonly)
3470             deps->in_post_call_group_p = not_post_call;
3471 	}
3472     }
3473 
3474  debug_dont_end_call_group:
3475   if ((current_sched_info->flags & DO_SPECULATION)
3476       && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3477     /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3478        be speculated.  */
3479     {
3480       if (sel_sched_p ())
3481         sel_mark_hard_insn (insn);
3482       else
3483         {
3484           sd_iterator_def sd_it;
3485           dep_t dep;
3486 
3487           for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3488                sd_iterator_cond (&sd_it, &dep);)
3489             change_spec_dep_to_hard (sd_it);
3490         }
3491     }
3492 
3493   /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3494      honor their original ordering.  */
3495   if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3496     {
3497       if (deps->last_args_size)
3498 	add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3499       if (!deps->readonly)
3500 	deps->last_args_size = insn;
3501     }
3502 
3503   /* We must not mix prologue and epilogue insns.  See PR78029.  */
3504   if (prologue_contains (insn))
3505     {
3506       add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
3507       if (!deps->readonly)
3508 	{
3509 	  if (deps->last_logue_was_epilogue)
3510 	    free_INSN_LIST_list (&deps->last_prologue);
3511 	  deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3512 	  deps->last_logue_was_epilogue = false;
3513 	}
3514     }
3515 
3516   if (epilogue_contains (insn))
3517     {
3518       add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
3519       if (!deps->readonly)
3520 	{
3521 	  if (!deps->last_logue_was_epilogue)
3522 	    free_INSN_LIST_list (&deps->last_epilogue);
3523 	  deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3524 	  deps->last_logue_was_epilogue = true;
3525 	}
3526     }
3527 }
3528 
3529 /* Return TRUE if INSN might not always return normally (e.g. call exit,
3530    longjmp, loop forever, ...).  */
3531 /* FIXME: Why can't this function just use flags_from_decl_or_type and
3532    test for ECF_NORETURN?  */
3533 static bool
call_may_noreturn_p(rtx_insn * insn)3534 call_may_noreturn_p (rtx_insn *insn)
3535 {
3536   rtx call;
3537 
3538   /* const or pure calls that aren't looping will always return.  */
3539   if (RTL_CONST_OR_PURE_CALL_P (insn)
3540       && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3541     return false;
3542 
3543   call = get_call_rtx_from (insn);
3544   if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3545     {
3546       rtx symbol = XEXP (XEXP (call, 0), 0);
3547       if (SYMBOL_REF_DECL (symbol)
3548 	  && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3549 	{
3550 	  if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3551 	      == BUILT_IN_NORMAL)
3552 	    switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3553 	      {
3554 	      case BUILT_IN_BCMP:
3555 	      case BUILT_IN_BCOPY:
3556 	      case BUILT_IN_BZERO:
3557 	      case BUILT_IN_INDEX:
3558 	      case BUILT_IN_MEMCHR:
3559 	      case BUILT_IN_MEMCMP:
3560 	      case BUILT_IN_MEMCPY:
3561 	      case BUILT_IN_MEMMOVE:
3562 	      case BUILT_IN_MEMPCPY:
3563 	      case BUILT_IN_MEMSET:
3564 	      case BUILT_IN_RINDEX:
3565 	      case BUILT_IN_STPCPY:
3566 	      case BUILT_IN_STPNCPY:
3567 	      case BUILT_IN_STRCAT:
3568 	      case BUILT_IN_STRCHR:
3569 	      case BUILT_IN_STRCMP:
3570 	      case BUILT_IN_STRCPY:
3571 	      case BUILT_IN_STRCSPN:
3572 	      case BUILT_IN_STRLEN:
3573 	      case BUILT_IN_STRNCAT:
3574 	      case BUILT_IN_STRNCMP:
3575 	      case BUILT_IN_STRNCPY:
3576 	      case BUILT_IN_STRPBRK:
3577 	      case BUILT_IN_STRRCHR:
3578 	      case BUILT_IN_STRSPN:
3579 	      case BUILT_IN_STRSTR:
3580 		/* Assume certain string/memory builtins always return.  */
3581 		return false;
3582 	      default:
3583 		break;
3584 	      }
3585 	}
3586     }
3587 
3588   /* For all other calls assume that they might not always return.  */
3589   return true;
3590 }
3591 
3592 /* Return true if INSN should be made dependent on the previous instruction
3593    group, and if all INSN's dependencies should be moved to the first
3594    instruction of that group.  */
3595 
3596 static bool
chain_to_prev_insn_p(rtx_insn * insn)3597 chain_to_prev_insn_p (rtx_insn *insn)
3598 {
3599   /* INSN forms a group with the previous instruction.  */
3600   if (SCHED_GROUP_P (insn))
3601     return true;
3602 
3603   /* If the previous instruction clobbers a register R and this one sets
3604      part of R, the clobber was added specifically to help us track the
3605      liveness of R.  There's no point scheduling the clobber and leaving
3606      INSN behind, especially if we move the clobber to another block.  */
3607   rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3608   if (prev
3609       && INSN_P (prev)
3610       && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3611       && GET_CODE (PATTERN (prev)) == CLOBBER)
3612     {
3613       rtx x = XEXP (PATTERN (prev), 0);
3614       if (set_of (x, insn))
3615 	return true;
3616     }
3617 
3618   return false;
3619 }
3620 
3621 /* Analyze INSN with DEPS as a context.  */
3622 void
deps_analyze_insn(class deps_desc * deps,rtx_insn * insn)3623 deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
3624 {
3625   if (sched_deps_info->start_insn)
3626     sched_deps_info->start_insn (insn);
3627 
3628   /* Record the condition for this insn.  */
3629   if (NONDEBUG_INSN_P (insn))
3630     {
3631       rtx t;
3632       sched_get_condition_with_rev (insn, NULL);
3633       t = INSN_CACHED_COND (insn);
3634       INSN_COND_DEPS (insn) = NULL;
3635       if (reload_completed
3636 	  && (current_sched_info->flags & DO_PREDICATION)
3637 	  && COMPARISON_P (t)
3638 	  && REG_P (XEXP (t, 0))
3639 	  && CONSTANT_P (XEXP (t, 1)))
3640 	{
3641 	  unsigned int regno;
3642 	  int nregs;
3643 	  rtx_insn_list *cond_deps = NULL;
3644 	  t = XEXP (t, 0);
3645 	  regno = REGNO (t);
3646 	  nregs = REG_NREGS (t);
3647 	  while (nregs-- > 0)
3648 	    {
3649 	      struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3650 	      cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3651 	      cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3652 	      cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3653 	    }
3654 	  INSN_COND_DEPS (insn) = cond_deps;
3655 	}
3656     }
3657 
3658   if (JUMP_P (insn))
3659     {
3660       /* Make each JUMP_INSN (but not a speculative check)
3661          a scheduling barrier for memory references.  */
3662       if (!deps->readonly
3663           && !(sel_sched_p ()
3664                && sel_insn_is_speculation_check (insn)))
3665         {
3666           /* Keep the list a reasonable size.  */
3667 	  if (deps->pending_flush_length++ >= param_max_pending_list_length)
3668 	    flush_pending_lists (deps, insn, true, true);
3669           else
3670 	    deps->pending_jump_insns
3671               = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3672         }
3673 
3674       /* For each insn which shouldn't cross a jump, add a dependence.  */
3675       add_dependence_list_and_free (deps, insn,
3676 				    &deps->sched_before_next_jump, 1,
3677 				    REG_DEP_ANTI, true);
3678 
3679       sched_analyze_insn (deps, PATTERN (insn), insn);
3680     }
3681   else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3682     {
3683       sched_analyze_insn (deps, PATTERN (insn), insn);
3684     }
3685   else if (CALL_P (insn))
3686     {
3687       int i;
3688 
3689       CANT_MOVE (insn) = 1;
3690 
3691       if (find_reg_note (insn, REG_SETJMP, NULL))
3692         {
3693           /* This is setjmp.  Assume that all registers, not just
3694              hard registers, may be clobbered by this call.  */
3695           reg_pending_barrier = MOVE_BARRIER;
3696         }
3697       else
3698         {
3699 	  function_abi callee_abi = insn_callee_abi (insn);
3700           for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3701             /* A call may read and modify global register variables.  */
3702             if (global_regs[i])
3703               {
3704                 SET_REGNO_REG_SET (reg_pending_sets, i);
3705                 SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3706               }
3707           /* Other call-clobbered hard regs may be clobbered.
3708              Since we only have a choice between 'might be clobbered'
3709              and 'definitely not clobbered', we must include all
3710              partly call-clobbered registers here.  */
3711 	    else if (callee_abi.clobbers_at_least_part_of_reg_p (i))
3712               SET_REGNO_REG_SET (reg_pending_clobbers, i);
3713           /* We don't know what set of fixed registers might be used
3714              by the function, but it is certain that the stack pointer
3715              is among them, but be conservative.  */
3716             else if (fixed_regs[i])
3717 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3718           /* The frame pointer is normally not used by the function
3719              itself, but by the debugger.  */
3720           /* ??? MIPS o32 is an exception.  It uses the frame pointer
3721              in the macro expansion of jal but does not represent this
3722              fact in the call_insn rtl.  */
3723             else if (i == FRAME_POINTER_REGNUM
3724                      || (i == HARD_FRAME_POINTER_REGNUM
3725                          && (! reload_completed || frame_pointer_needed)))
3726 	      SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3727         }
3728 
3729       /* For each insn which shouldn't cross a call, add a dependence
3730          between that insn and this call insn.  */
3731       add_dependence_list_and_free (deps, insn,
3732                                     &deps->sched_before_next_call, 1,
3733                                     REG_DEP_ANTI, true);
3734 
3735       sched_analyze_insn (deps, PATTERN (insn), insn);
3736 
3737       /* If CALL would be in a sched group, then this will violate
3738 	 convention that sched group insns have dependencies only on the
3739 	 previous instruction.
3740 
3741 	 Of course one can say: "Hey!  What about head of the sched group?"
3742 	 And I will answer: "Basic principles (one dep per insn) are always
3743 	 the same."  */
3744       gcc_assert (!SCHED_GROUP_P (insn));
3745 
3746       /* In the absence of interprocedural alias analysis, we must flush
3747          all pending reads and writes, and start new dependencies starting
3748          from here.  But only flush writes for constant calls (which may
3749          be passed a pointer to something we haven't written yet).  */
3750       flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3751 
3752       if (!deps->readonly)
3753         {
3754           /* Remember the last function call for limiting lifetimes.  */
3755           free_INSN_LIST_list (&deps->last_function_call);
3756           deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3757 
3758 	  if (call_may_noreturn_p (insn))
3759 	    {
3760 	      /* Remember the last function call that might not always return
3761 		 normally for limiting moves of trapping insns.  */
3762 	      free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3763 	      deps->last_function_call_may_noreturn
3764 		= alloc_INSN_LIST (insn, NULL_RTX);
3765 	    }
3766 
3767           /* Before reload, begin a post-call group, so as to keep the
3768              lifetimes of hard registers correct.  */
3769           if (! reload_completed)
3770             deps->in_post_call_group_p = post_call;
3771         }
3772     }
3773 
3774   if (sched_deps_info->use_cselib)
3775     cselib_process_insn (insn);
3776 
3777   if (sched_deps_info->finish_insn)
3778     sched_deps_info->finish_insn ();
3779 
3780   /* Fixup the dependencies in the sched group.  */
3781   if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3782       && chain_to_prev_insn_p (insn)
3783       && !sel_sched_p ())
3784     chain_to_prev_insn (insn);
3785 }
3786 
3787 /* Initialize DEPS for the new block beginning with HEAD.  */
3788 void
deps_start_bb(class deps_desc * deps,rtx_insn * head)3789 deps_start_bb (class deps_desc *deps, rtx_insn *head)
3790 {
3791   gcc_assert (!deps->readonly);
3792 
3793   /* Before reload, if the previous block ended in a call, show that
3794      we are inside a post-call group, so as to keep the lifetimes of
3795      hard registers correct.  */
3796   if (! reload_completed && !LABEL_P (head))
3797     {
3798       rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3799 
3800       if (insn && CALL_P (insn))
3801 	deps->in_post_call_group_p = post_call_initial;
3802     }
3803 }
3804 
3805 /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3806    dependencies for each insn.  */
3807 void
sched_analyze(class deps_desc * deps,rtx_insn * head,rtx_insn * tail)3808 sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3809 {
3810   rtx_insn *insn;
3811 
3812   if (sched_deps_info->use_cselib)
3813     cselib_init (CSELIB_RECORD_MEMORY);
3814 
3815   deps_start_bb (deps, head);
3816 
3817   for (insn = head;; insn = NEXT_INSN (insn))
3818     {
3819       if (INSN_P (insn))
3820 	{
3821 	  /* And initialize deps_lists.  */
3822 	  sd_init_insn (insn);
3823 	  /* Clean up SCHED_GROUP_P which may be set by last
3824 	     scheduler pass.  */
3825 	  if (SCHED_GROUP_P (insn))
3826 	    SCHED_GROUP_P (insn) = 0;
3827 	}
3828 
3829       deps_analyze_insn (deps, insn);
3830 
3831       if (insn == tail)
3832 	{
3833 	  if (sched_deps_info->use_cselib)
3834 	    cselib_finish ();
3835 	  return;
3836 	}
3837     }
3838 }
3839 
3840 /* Helper for sched_free_deps ().
3841    Delete INSN's (RESOLVED_P) backward dependencies.  */
3842 static void
delete_dep_nodes_in_back_deps(rtx_insn * insn,bool resolved_p)3843 delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3844 {
3845   sd_iterator_def sd_it;
3846   dep_t dep;
3847   sd_list_types_def types;
3848 
3849   if (resolved_p)
3850     types = SD_LIST_RES_BACK;
3851   else
3852     types = SD_LIST_BACK;
3853 
3854   for (sd_it = sd_iterator_start (insn, types);
3855        sd_iterator_cond (&sd_it, &dep);)
3856     {
3857       dep_link_t link = *sd_it.linkp;
3858       dep_node_t node = DEP_LINK_NODE (link);
3859       deps_list_t back_list;
3860       deps_list_t forw_list;
3861 
3862       get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3863       remove_from_deps_list (link, back_list);
3864       delete_dep_node (node);
3865     }
3866 }
3867 
3868 /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3869    deps_lists.  */
3870 void
sched_free_deps(rtx_insn * head,rtx_insn * tail,bool resolved_p)3871 sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3872 {
3873   rtx_insn *insn;
3874   rtx_insn *next_tail = NEXT_INSN (tail);
3875 
3876   /* We make two passes since some insns may be scheduled before their
3877      dependencies are resolved.  */
3878   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3879     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3880       {
3881 	/* Clear forward deps and leave the dep_nodes to the
3882 	   corresponding back_deps list.  */
3883 	if (resolved_p)
3884 	  clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3885 	else
3886 	  clear_deps_list (INSN_FORW_DEPS (insn));
3887       }
3888   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3889     if (INSN_P (insn) && INSN_LUID (insn) > 0)
3890       {
3891 	/* Clear resolved back deps together with its dep_nodes.  */
3892 	delete_dep_nodes_in_back_deps (insn, resolved_p);
3893 
3894 	sd_finish_insn (insn);
3895       }
3896 }
3897 
3898 /* Initialize variables for region data dependence analysis.
3899    When LAZY_REG_LAST is true, do not allocate reg_last array
3900    of class deps_desc immediately.  */
3901 
3902 void
init_deps(class deps_desc * deps,bool lazy_reg_last)3903 init_deps (class deps_desc *deps, bool lazy_reg_last)
3904 {
3905   int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3906 
3907   deps->max_reg = max_reg;
3908   if (lazy_reg_last)
3909     deps->reg_last = NULL;
3910   else
3911     deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3912   INIT_REG_SET (&deps->reg_last_in_use);
3913 
3914   deps->pending_read_insns = 0;
3915   deps->pending_read_mems = 0;
3916   deps->pending_write_insns = 0;
3917   deps->pending_write_mems = 0;
3918   deps->pending_jump_insns = 0;
3919   deps->pending_read_list_length = 0;
3920   deps->pending_write_list_length = 0;
3921   deps->pending_flush_length = 0;
3922   deps->last_pending_memory_flush = 0;
3923   deps->last_function_call = 0;
3924   deps->last_function_call_may_noreturn = 0;
3925   deps->sched_before_next_call = 0;
3926   deps->sched_before_next_jump = 0;
3927   deps->in_post_call_group_p = not_post_call;
3928   deps->last_debug_insn = 0;
3929   deps->last_args_size = 0;
3930   deps->last_prologue = 0;
3931   deps->last_epilogue = 0;
3932   deps->last_logue_was_epilogue = false;
3933   deps->last_reg_pending_barrier = NOT_A_BARRIER;
3934   deps->readonly = 0;
3935 }
3936 
3937 /* Init only reg_last field of DEPS, which was not allocated before as
3938    we inited DEPS lazily.  */
3939 void
init_deps_reg_last(class deps_desc * deps)3940 init_deps_reg_last (class deps_desc *deps)
3941 {
3942   gcc_assert (deps && deps->max_reg > 0);
3943   gcc_assert (deps->reg_last == NULL);
3944 
3945   deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3946 }
3947 
3948 
3949 /* Free insn lists found in DEPS.  */
3950 
3951 void
free_deps(class deps_desc * deps)3952 free_deps (class deps_desc *deps)
3953 {
3954   unsigned i;
3955   reg_set_iterator rsi;
3956 
3957   /* We set max_reg to 0 when this context was already freed.  */
3958   if (deps->max_reg == 0)
3959     {
3960       gcc_assert (deps->reg_last == NULL);
3961       return;
3962     }
3963   deps->max_reg = 0;
3964 
3965   free_INSN_LIST_list (&deps->pending_read_insns);
3966   free_EXPR_LIST_list (&deps->pending_read_mems);
3967   free_INSN_LIST_list (&deps->pending_write_insns);
3968   free_EXPR_LIST_list (&deps->pending_write_mems);
3969   free_INSN_LIST_list (&deps->last_pending_memory_flush);
3970 
3971   /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3972      times.  For a testcase with 42000 regs and 8000 small basic blocks,
3973      this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3974   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3975     {
3976       struct deps_reg *reg_last = &deps->reg_last[i];
3977       if (reg_last->uses)
3978 	free_INSN_LIST_list (&reg_last->uses);
3979       if (reg_last->sets)
3980 	free_INSN_LIST_list (&reg_last->sets);
3981       if (reg_last->implicit_sets)
3982 	free_INSN_LIST_list (&reg_last->implicit_sets);
3983       if (reg_last->control_uses)
3984 	free_INSN_LIST_list (&reg_last->control_uses);
3985       if (reg_last->clobbers)
3986 	free_INSN_LIST_list (&reg_last->clobbers);
3987     }
3988   CLEAR_REG_SET (&deps->reg_last_in_use);
3989 
3990   /* As we initialize reg_last lazily, it is possible that we didn't allocate
3991      it at all.  */
3992   free (deps->reg_last);
3993   deps->reg_last = NULL;
3994 
3995   deps = NULL;
3996 }
3997 
3998 /* Remove INSN from dependence contexts DEPS.  */
3999 void
remove_from_deps(class deps_desc * deps,rtx_insn * insn)4000 remove_from_deps (class deps_desc *deps, rtx_insn *insn)
4001 {
4002   int removed;
4003   unsigned i;
4004   reg_set_iterator rsi;
4005 
4006   removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4007                                                &deps->pending_read_mems);
4008   if (!DEBUG_INSN_P (insn))
4009     deps->pending_read_list_length -= removed;
4010   removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4011                                                &deps->pending_write_mems);
4012   deps->pending_write_list_length -= removed;
4013 
4014   removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4015   deps->pending_flush_length -= removed;
4016   removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4017   deps->pending_flush_length -= removed;
4018 
4019   unsigned to_clear = -1U;
4020   EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4021     {
4022       if (to_clear != -1U)
4023 	{
4024 	  CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4025 	  to_clear = -1U;
4026 	}
4027       struct deps_reg *reg_last = &deps->reg_last[i];
4028       if (reg_last->uses)
4029 	remove_from_dependence_list (insn, &reg_last->uses);
4030       if (reg_last->sets)
4031 	remove_from_dependence_list (insn, &reg_last->sets);
4032       if (reg_last->implicit_sets)
4033 	remove_from_dependence_list (insn, &reg_last->implicit_sets);
4034       if (reg_last->clobbers)
4035 	remove_from_dependence_list (insn, &reg_last->clobbers);
4036       if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4037 	  && !reg_last->clobbers)
4038 	to_clear = i;
4039     }
4040   if (to_clear != -1U)
4041     CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4042 
4043   if (CALL_P (insn))
4044     {
4045       remove_from_dependence_list (insn, &deps->last_function_call);
4046       remove_from_dependence_list (insn,
4047 				   &deps->last_function_call_may_noreturn);
4048     }
4049   remove_from_dependence_list (insn, &deps->sched_before_next_call);
4050 }
4051 
4052 /* Init deps data vector.  */
4053 static void
init_deps_data_vector(void)4054 init_deps_data_vector (void)
4055 {
4056   int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4057   if (reserve > 0 && ! h_d_i_d.space (reserve))
4058     h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2, true);
4059 }
4060 
4061 /* If it is profitable to use them, initialize or extend (depending on
4062    GLOBAL_P) dependency data.  */
4063 void
sched_deps_init(bool global_p)4064 sched_deps_init (bool global_p)
4065 {
4066   /* Average number of insns in the basic block.
4067      '+ 1' is used to make it nonzero.  */
4068   int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4069 
4070   init_deps_data_vector ();
4071 
4072   /* We use another caching mechanism for selective scheduling, so
4073      we don't use this one.  */
4074   if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4075     {
4076       /* ?!? We could save some memory by computing a per-region luid mapping
4077          which could reduce both the number of vectors in the cache and the
4078          size of each vector.  Instead we just avoid the cache entirely unless
4079          the average number of instructions in a basic block is very high.  See
4080          the comment before the declaration of true_dependency_cache for
4081          what we consider "very high".  */
4082       cache_size = 0;
4083       extend_dependency_caches (sched_max_luid, true);
4084     }
4085 
4086   if (global_p)
4087     {
4088       dl_pool = new object_allocator<_deps_list> ("deps_list");
4089 				/* Allocate lists for one block at a time.  */
4090       dn_pool = new object_allocator<_dep_node> ("dep_node");
4091 				/* Allocate nodes for one block at a time.  */
4092     }
4093 }
4094 
4095 
4096 /* Create or extend (depending on CREATE_P) dependency caches to
4097    size N.  */
4098 void
extend_dependency_caches(int n,bool create_p)4099 extend_dependency_caches (int n, bool create_p)
4100 {
4101   if (create_p || true_dependency_cache)
4102     {
4103       int i, luid = cache_size + n;
4104 
4105       true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4106 					  luid);
4107       output_dependency_cache = XRESIZEVEC (bitmap_head,
4108 					    output_dependency_cache, luid);
4109       anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4110 					  luid);
4111       control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4112 					  luid);
4113 
4114       if (current_sched_info->flags & DO_SPECULATION)
4115         spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4116 					    luid);
4117 
4118       for (i = cache_size; i < luid; i++)
4119 	{
4120 	  bitmap_initialize (&true_dependency_cache[i], 0);
4121 	  bitmap_initialize (&output_dependency_cache[i], 0);
4122 	  bitmap_initialize (&anti_dependency_cache[i], 0);
4123 	  bitmap_initialize (&control_dependency_cache[i], 0);
4124 
4125           if (current_sched_info->flags & DO_SPECULATION)
4126             bitmap_initialize (&spec_dependency_cache[i], 0);
4127 	}
4128       cache_size = luid;
4129     }
4130 }
4131 
4132 /* Finalize dependency information for the whole function.  */
4133 void
sched_deps_finish(void)4134 sched_deps_finish (void)
4135 {
4136   gcc_assert (deps_pools_are_empty_p ());
4137   delete dn_pool;
4138   delete dl_pool;
4139   dn_pool = NULL;
4140   dl_pool = NULL;
4141 
4142   h_d_i_d.release ();
4143   cache_size = 0;
4144 
4145   if (true_dependency_cache)
4146     {
4147       int i;
4148 
4149       for (i = 0; i < cache_size; i++)
4150 	{
4151 	  bitmap_clear (&true_dependency_cache[i]);
4152 	  bitmap_clear (&output_dependency_cache[i]);
4153 	  bitmap_clear (&anti_dependency_cache[i]);
4154 	  bitmap_clear (&control_dependency_cache[i]);
4155 
4156           if (sched_deps_info->generate_spec_deps)
4157             bitmap_clear (&spec_dependency_cache[i]);
4158 	}
4159       free (true_dependency_cache);
4160       true_dependency_cache = NULL;
4161       free (output_dependency_cache);
4162       output_dependency_cache = NULL;
4163       free (anti_dependency_cache);
4164       anti_dependency_cache = NULL;
4165       free (control_dependency_cache);
4166       control_dependency_cache = NULL;
4167 
4168       if (sched_deps_info->generate_spec_deps)
4169         {
4170           free (spec_dependency_cache);
4171           spec_dependency_cache = NULL;
4172         }
4173 
4174     }
4175 }
4176 
4177 /* Initialize some global variables needed by the dependency analysis
4178    code.  */
4179 
4180 void
init_deps_global(void)4181 init_deps_global (void)
4182 {
4183   CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4184   CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4185   reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4186   reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4187   reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4188   reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4189   reg_pending_barrier = NOT_A_BARRIER;
4190 
4191   if (!sel_sched_p () || sched_emulate_haifa_p)
4192     {
4193       sched_deps_info->start_insn = haifa_start_insn;
4194       sched_deps_info->finish_insn = haifa_finish_insn;
4195 
4196       sched_deps_info->note_reg_set = haifa_note_reg_set;
4197       sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4198       sched_deps_info->note_reg_use = haifa_note_reg_use;
4199 
4200       sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4201       sched_deps_info->note_dep = haifa_note_dep;
4202    }
4203 }
4204 
4205 /* Free everything used by the dependency analysis code.  */
4206 
4207 void
finish_deps_global(void)4208 finish_deps_global (void)
4209 {
4210   FREE_REG_SET (reg_pending_sets);
4211   FREE_REG_SET (reg_pending_clobbers);
4212   FREE_REG_SET (reg_pending_uses);
4213   FREE_REG_SET (reg_pending_control_uses);
4214 }
4215 
4216 /* Estimate the weakness of dependence between MEM1 and MEM2.  */
4217 dw_t
estimate_dep_weak(rtx mem1,rtx mem2)4218 estimate_dep_weak (rtx mem1, rtx mem2)
4219 {
4220   if (mem1 == mem2)
4221     /* MEMs are the same - don't speculate.  */
4222     return MIN_DEP_WEAK;
4223 
4224   rtx r1 = XEXP (mem1, 0);
4225   rtx r2 = XEXP (mem2, 0);
4226 
4227   if (sched_deps_info->use_cselib)
4228     {
4229       /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4230 	 dangling at this point, since we never preserve them.  Instead we
4231 	 canonicalize manually to get stable VALUEs out of hashing.  */
4232       if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4233 	r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4234       if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4235 	r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4236     }
4237 
4238   if (r1 == r2
4239       || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4240     /* Again, MEMs are the same.  */
4241     return MIN_DEP_WEAK;
4242   else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4243     /* Different addressing modes - reason to be more speculative,
4244        than usual.  */
4245     return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4246   else
4247     /* We can't say anything about the dependence.  */
4248     return UNCERTAIN_DEP_WEAK;
4249 }
4250 
4251 /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4252    This function can handle same INSN and ELEM (INSN == ELEM).
4253    It is a convenience wrapper.  */
4254 static void
add_dependence_1(rtx_insn * insn,rtx_insn * elem,enum reg_note dep_type)4255 add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4256 {
4257   ds_t ds;
4258   bool internal;
4259 
4260   if (dep_type == REG_DEP_TRUE)
4261     ds = DEP_TRUE;
4262   else if (dep_type == REG_DEP_OUTPUT)
4263     ds = DEP_OUTPUT;
4264   else if (dep_type == REG_DEP_CONTROL)
4265     ds = DEP_CONTROL;
4266   else
4267     {
4268       gcc_assert (dep_type == REG_DEP_ANTI);
4269       ds = DEP_ANTI;
4270     }
4271 
4272   /* When add_dependence is called from inside sched-deps.cc, we expect
4273      cur_insn to be non-null.  */
4274   internal = cur_insn != NULL;
4275   if (internal)
4276     gcc_assert (insn == cur_insn);
4277   else
4278     cur_insn = insn;
4279 
4280   note_dep (elem, ds);
4281   if (!internal)
4282     cur_insn = NULL;
4283 }
4284 
4285 /* Return weakness of speculative type TYPE in the dep_status DS,
4286    without checking to prevent ICEs on malformed input.  */
4287 static dw_t
get_dep_weak_1(ds_t ds,ds_t type)4288 get_dep_weak_1 (ds_t ds, ds_t type)
4289 {
4290   ds = ds & type;
4291 
4292   switch (type)
4293     {
4294     case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4295     case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4296     case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4297     case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4298     default: gcc_unreachable ();
4299     }
4300 
4301   return (dw_t) ds;
4302 }
4303 
4304 /* Return weakness of speculative type TYPE in the dep_status DS.  */
4305 dw_t
get_dep_weak(ds_t ds,ds_t type)4306 get_dep_weak (ds_t ds, ds_t type)
4307 {
4308   dw_t dw = get_dep_weak_1 (ds, type);
4309 
4310   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4311   return dw;
4312 }
4313 
4314 /* Return the dep_status, which has the same parameters as DS, except for
4315    speculative type TYPE, that will have weakness DW.  */
4316 ds_t
set_dep_weak(ds_t ds,ds_t type,dw_t dw)4317 set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4318 {
4319   gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4320 
4321   ds &= ~type;
4322   switch (type)
4323     {
4324     case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4325     case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4326     case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4327     case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4328     default: gcc_unreachable ();
4329     }
4330   return ds;
4331 }
4332 
4333 /* Return the join of two dep_statuses DS1 and DS2.
4334    If MAX_P is true then choose the greater probability,
4335    otherwise multiply probabilities.
4336    This function assumes that both DS1 and DS2 contain speculative bits.  */
4337 static ds_t
ds_merge_1(ds_t ds1,ds_t ds2,bool max_p)4338 ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4339 {
4340   ds_t ds, t;
4341 
4342   gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4343 
4344   ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4345 
4346   t = FIRST_SPEC_TYPE;
4347   do
4348     {
4349       if ((ds1 & t) && !(ds2 & t))
4350 	ds |= ds1 & t;
4351       else if (!(ds1 & t) && (ds2 & t))
4352 	ds |= ds2 & t;
4353       else if ((ds1 & t) && (ds2 & t))
4354 	{
4355 	  dw_t dw1 = get_dep_weak (ds1, t);
4356 	  dw_t dw2 = get_dep_weak (ds2, t);
4357 	  ds_t dw;
4358 
4359 	  if (!max_p)
4360 	    {
4361 	      dw = ((ds_t) dw1) * ((ds_t) dw2);
4362 	      dw /= MAX_DEP_WEAK;
4363 	      if (dw < MIN_DEP_WEAK)
4364 		dw = MIN_DEP_WEAK;
4365 	    }
4366 	  else
4367 	    {
4368 	      if (dw1 >= dw2)
4369 		dw = dw1;
4370 	      else
4371 		dw = dw2;
4372 	    }
4373 
4374 	  ds = set_dep_weak (ds, t, (dw_t) dw);
4375 	}
4376 
4377       if (t == LAST_SPEC_TYPE)
4378 	break;
4379       t <<= SPEC_TYPE_SHIFT;
4380     }
4381   while (1);
4382 
4383   return ds;
4384 }
4385 
4386 /* Return the join of two dep_statuses DS1 and DS2.
4387    This function assumes that both DS1 and DS2 contain speculative bits.  */
4388 ds_t
ds_merge(ds_t ds1,ds_t ds2)4389 ds_merge (ds_t ds1, ds_t ds2)
4390 {
4391   return ds_merge_1 (ds1, ds2, false);
4392 }
4393 
4394 /* Return the join of two dep_statuses DS1 and DS2.  */
4395 ds_t
ds_full_merge(ds_t ds,ds_t ds2,rtx mem1,rtx mem2)4396 ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4397 {
4398   ds_t new_status = ds | ds2;
4399 
4400   if (new_status & SPECULATIVE)
4401     {
4402       if ((ds && !(ds & SPECULATIVE))
4403 	  || (ds2 && !(ds2 & SPECULATIVE)))
4404 	/* Then this dep can't be speculative.  */
4405 	new_status &= ~SPECULATIVE;
4406       else
4407 	{
4408 	  /* Both are speculative.  Merging probabilities.  */
4409 	  if (mem1)
4410 	    {
4411 	      dw_t dw;
4412 
4413 	      dw = estimate_dep_weak (mem1, mem2);
4414 	      ds = set_dep_weak (ds, BEGIN_DATA, dw);
4415 	    }
4416 
4417 	  if (!ds)
4418 	    new_status = ds2;
4419 	  else if (!ds2)
4420 	    new_status = ds;
4421 	  else
4422 	    new_status = ds_merge (ds2, ds);
4423 	}
4424     }
4425 
4426   return new_status;
4427 }
4428 
4429 /* Return the join of DS1 and DS2.  Use maximum instead of multiplying
4430    probabilities.  */
4431 ds_t
ds_max_merge(ds_t ds1,ds_t ds2)4432 ds_max_merge (ds_t ds1, ds_t ds2)
4433 {
4434   if (ds1 == 0 && ds2 == 0)
4435     return 0;
4436 
4437   if (ds1 == 0 && ds2 != 0)
4438     return ds2;
4439 
4440   if (ds1 != 0 && ds2 == 0)
4441     return ds1;
4442 
4443   return ds_merge_1 (ds1, ds2, true);
4444 }
4445 
4446 /* Return the probability of speculation success for the speculation
4447    status DS.  */
4448 dw_t
ds_weak(ds_t ds)4449 ds_weak (ds_t ds)
4450 {
4451   ds_t res = 1, dt;
4452   int n = 0;
4453 
4454   dt = FIRST_SPEC_TYPE;
4455   do
4456     {
4457       if (ds & dt)
4458 	{
4459 	  res *= (ds_t) get_dep_weak (ds, dt);
4460 	  n++;
4461 	}
4462 
4463       if (dt == LAST_SPEC_TYPE)
4464 	break;
4465       dt <<= SPEC_TYPE_SHIFT;
4466     }
4467   while (1);
4468 
4469   gcc_assert (n);
4470   while (--n)
4471     res /= MAX_DEP_WEAK;
4472 
4473   if (res < MIN_DEP_WEAK)
4474     res = MIN_DEP_WEAK;
4475 
4476   gcc_assert (res <= MAX_DEP_WEAK);
4477 
4478   return (dw_t) res;
4479 }
4480 
4481 /* Return a dep status that contains all speculation types of DS.  */
4482 ds_t
ds_get_speculation_types(ds_t ds)4483 ds_get_speculation_types (ds_t ds)
4484 {
4485   if (ds & BEGIN_DATA)
4486     ds |= BEGIN_DATA;
4487   if (ds & BE_IN_DATA)
4488     ds |= BE_IN_DATA;
4489   if (ds & BEGIN_CONTROL)
4490     ds |= BEGIN_CONTROL;
4491   if (ds & BE_IN_CONTROL)
4492     ds |= BE_IN_CONTROL;
4493 
4494   return ds & SPECULATIVE;
4495 }
4496 
4497 /* Return a dep status that contains maximal weakness for each speculation
4498    type present in DS.  */
4499 ds_t
ds_get_max_dep_weak(ds_t ds)4500 ds_get_max_dep_weak (ds_t ds)
4501 {
4502   if (ds & BEGIN_DATA)
4503     ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4504   if (ds & BE_IN_DATA)
4505     ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4506   if (ds & BEGIN_CONTROL)
4507     ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4508   if (ds & BE_IN_CONTROL)
4509     ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4510 
4511   return ds;
4512 }
4513 
4514 /* Dump information about the dependence status S.  */
4515 static void
dump_ds(FILE * f,ds_t s)4516 dump_ds (FILE *f, ds_t s)
4517 {
4518   fprintf (f, "{");
4519 
4520   if (s & BEGIN_DATA)
4521     fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4522   if (s & BE_IN_DATA)
4523     fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4524   if (s & BEGIN_CONTROL)
4525     fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4526   if (s & BE_IN_CONTROL)
4527     fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4528 
4529   if (s & HARD_DEP)
4530     fprintf (f, "HARD_DEP; ");
4531 
4532   if (s & DEP_TRUE)
4533     fprintf (f, "DEP_TRUE; ");
4534   if (s & DEP_OUTPUT)
4535     fprintf (f, "DEP_OUTPUT; ");
4536   if (s & DEP_ANTI)
4537     fprintf (f, "DEP_ANTI; ");
4538   if (s & DEP_CONTROL)
4539     fprintf (f, "DEP_CONTROL; ");
4540 
4541   fprintf (f, "}");
4542 }
4543 
4544 DEBUG_FUNCTION void
debug_ds(ds_t s)4545 debug_ds (ds_t s)
4546 {
4547   dump_ds (stderr, s);
4548   fprintf (stderr, "\n");
4549 }
4550 
4551 /* Verify that dependence type and status are consistent.
4552    If RELAXED_P is true, then skip dep_weakness checks.  */
4553 static void
check_dep(dep_t dep,bool relaxed_p)4554 check_dep (dep_t dep, bool relaxed_p)
4555 {
4556   enum reg_note dt = DEP_TYPE (dep);
4557   ds_t ds = DEP_STATUS (dep);
4558 
4559   gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4560 
4561   if (!(current_sched_info->flags & USE_DEPS_LIST))
4562     {
4563       gcc_assert (ds == 0);
4564       return;
4565     }
4566 
4567   /* Check that dependence type contains the same bits as the status.  */
4568   if (dt == REG_DEP_TRUE)
4569     gcc_assert (ds & DEP_TRUE);
4570   else if (dt == REG_DEP_OUTPUT)
4571     gcc_assert ((ds & DEP_OUTPUT)
4572 		&& !(ds & DEP_TRUE));
4573   else if (dt == REG_DEP_ANTI)
4574     gcc_assert ((ds & DEP_ANTI)
4575 		&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
4576   else
4577     gcc_assert (dt == REG_DEP_CONTROL
4578 		&& (ds & DEP_CONTROL)
4579 		&& !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4580 
4581   /* HARD_DEP cannot appear in dep_status of a link.  */
4582   gcc_assert (!(ds & HARD_DEP));
4583 
4584   /* Check that dependence status is set correctly when speculation is not
4585      supported.  */
4586   if (!sched_deps_info->generate_spec_deps)
4587     gcc_assert (!(ds & SPECULATIVE));
4588   else if (ds & SPECULATIVE)
4589     {
4590       if (!relaxed_p)
4591 	{
4592 	  ds_t type = FIRST_SPEC_TYPE;
4593 
4594 	  /* Check that dependence weakness is in proper range.  */
4595 	  do
4596 	    {
4597 	      if (ds & type)
4598 		get_dep_weak (ds, type);
4599 
4600 	      if (type == LAST_SPEC_TYPE)
4601 		break;
4602 	      type <<= SPEC_TYPE_SHIFT;
4603 	    }
4604 	  while (1);
4605 	}
4606 
4607       if (ds & BEGIN_SPEC)
4608 	{
4609 	  /* Only true dependence can be data speculative.  */
4610 	  if (ds & BEGIN_DATA)
4611 	    gcc_assert (ds & DEP_TRUE);
4612 
4613 	  /* Control dependencies in the insn scheduler are represented by
4614 	     anti-dependencies, therefore only anti dependence can be
4615 	     control speculative.  */
4616 	  if (ds & BEGIN_CONTROL)
4617 	    gcc_assert (ds & DEP_ANTI);
4618 	}
4619       else
4620 	{
4621 	  /* Subsequent speculations should resolve true dependencies.  */
4622 	  gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4623 	}
4624 
4625       /* Check that true and anti dependencies can't have other speculative
4626 	 statuses.  */
4627       if (ds & DEP_TRUE)
4628 	gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4629       /* An output dependence can't be speculative at all.  */
4630       gcc_assert (!(ds & DEP_OUTPUT));
4631       if (ds & DEP_ANTI)
4632 	gcc_assert (ds & BEGIN_CONTROL);
4633     }
4634 }
4635 
4636 /* The following code discovers opportunities to switch a memory reference
4637    and an increment by modifying the address.  We ensure that this is done
4638    only for dependencies that are only used to show a single register
4639    dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4640    instruction involved is subject to only one dep that can cause a pattern
4641    change.
4642 
4643    When we discover a suitable dependency, we fill in the dep_replacement
4644    structure to show how to modify the memory reference.  */
4645 
4646 /* Holds information about a pair of memory reference and register increment
4647    insns which depend on each other, but could possibly be interchanged.  */
4648 struct mem_inc_info
4649 {
4650   rtx_insn *inc_insn;
4651   rtx_insn *mem_insn;
4652 
4653   rtx *mem_loc;
4654   /* A register occurring in the memory address for which we wish to break
4655      the dependence.  This must be identical to the destination register of
4656      the increment.  */
4657   rtx mem_reg0;
4658   /* Any kind of index that is added to that register.  */
4659   rtx mem_index;
4660   /* The constant offset used in the memory address.  */
4661   HOST_WIDE_INT mem_constant;
4662   /* The constant added in the increment insn.  Negated if the increment is
4663      after the memory address.  */
4664   HOST_WIDE_INT inc_constant;
4665   /* The source register used in the increment.  May be different from mem_reg0
4666      if the increment occurs before the memory address.  */
4667   rtx inc_input;
4668 };
4669 
4670 /* Verify that the memory location described in MII can be replaced with
4671    one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
4672    insn remains unchanged by this function.  */
4673 
4674 static rtx
attempt_change(struct mem_inc_info * mii,rtx new_addr)4675 attempt_change (struct mem_inc_info *mii, rtx new_addr)
4676 {
4677   rtx mem = *mii->mem_loc;
4678   rtx new_mem;
4679 
4680   if (!targetm.new_address_profitable_p (mem, mii->mem_insn, new_addr))
4681     return NULL_RTX;
4682 
4683   /* Jump through a lot of hoops to keep the attributes up to date.  We
4684      do not want to call one of the change address variants that take
4685      an offset even though we know the offset in many cases.  These
4686      assume you are changing where the address is pointing by the
4687      offset.  */
4688   new_mem = replace_equiv_address_nv (mem, new_addr);
4689   if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4690     {
4691       if (sched_verbose >= 5)
4692 	fprintf (sched_dump, "validation failure\n");
4693       return NULL_RTX;
4694     }
4695 
4696   /* Put back the old one.  */
4697   validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4698 
4699   return new_mem;
4700 }
4701 
4702 /* Return true if INSN is of a form "a = b op c" where a and b are
4703    regs.  op is + if c is a reg and +|- if c is a const.  Fill in
4704    informantion in MII about what is found.
4705    BEFORE_MEM indicates whether the increment is found before or after
4706    a corresponding memory reference.  */
4707 
4708 static bool
parse_add_or_inc(struct mem_inc_info * mii,rtx_insn * insn,bool before_mem)4709 parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4710 {
4711   rtx pat = single_set (insn);
4712   rtx src, cst;
4713   bool regs_equal;
4714 
4715   if (RTX_FRAME_RELATED_P (insn) || !pat)
4716     return false;
4717 
4718   /* Do not allow breaking data dependencies for insns that are marked
4719      with REG_STACK_CHECK.  */
4720   if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4721     return false;
4722 
4723   /* Result must be single reg.  */
4724   if (!REG_P (SET_DEST (pat)))
4725     return false;
4726 
4727   if (GET_CODE (SET_SRC (pat)) != PLUS)
4728     return false;
4729 
4730   mii->inc_insn = insn;
4731   src = SET_SRC (pat);
4732   mii->inc_input = XEXP (src, 0);
4733 
4734   if (!REG_P (XEXP (src, 0)))
4735     return false;
4736 
4737   if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4738     return false;
4739 
4740   cst = XEXP (src, 1);
4741   if (!CONST_INT_P (cst))
4742     return false;
4743   mii->inc_constant = INTVAL (cst);
4744 
4745   regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4746 
4747   if (!before_mem)
4748     {
4749       mii->inc_constant = -mii->inc_constant;
4750       if (!regs_equal)
4751 	return false;
4752     }
4753 
4754   if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4755     {
4756       /* Note that the sign has already been reversed for !before_mem.  */
4757       if (STACK_GROWS_DOWNWARD)
4758 	return mii->inc_constant > 0;
4759       else
4760 	return mii->inc_constant < 0;
4761     }
4762   return true;
4763 }
4764 
4765 /* Once a suitable mem reference has been found and the corresponding data
4766    in MII has been filled in, this function is called to find a suitable
4767    add or inc insn involving the register we found in the memory
4768    reference.  */
4769 
4770 static bool
find_inc(struct mem_inc_info * mii,bool backwards)4771 find_inc (struct mem_inc_info *mii, bool backwards)
4772 {
4773   sd_iterator_def sd_it;
4774   dep_t dep;
4775 
4776   sd_it = sd_iterator_start (mii->mem_insn,
4777 			     backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW);
4778   while (sd_iterator_cond (&sd_it, &dep))
4779     {
4780       dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4781       rtx_insn *pro = DEP_PRO (dep);
4782       rtx_insn *con = DEP_CON (dep);
4783       rtx_insn *inc_cand = backwards ? pro : con;
4784       if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4785 	goto next;
4786       if (parse_add_or_inc (mii, inc_cand, backwards))
4787 	{
4788 	  struct dep_replacement *desc;
4789 	  df_ref def;
4790 	  rtx newaddr, newmem;
4791 
4792 	  if (sched_verbose >= 5)
4793 	    fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4794 		     INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4795 
4796 	  /* Need to assure that none of the operands of the inc
4797 	     instruction are assigned to by the mem insn.  */
4798 	  FOR_EACH_INSN_DEF (def, mii->mem_insn)
4799 	    if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4800 		|| reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4801 	      {
4802 		if (sched_verbose >= 5)
4803 		  fprintf (sched_dump,
4804 			   "inc conflicts with store failure.\n");
4805 		goto next;
4806 	      }
4807 
4808 	  newaddr = mii->inc_input;
4809 	  if (mii->mem_index != NULL_RTX)
4810 	    newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4811 				    mii->mem_index);
4812 	  newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4813 				   mii->mem_constant + mii->inc_constant);
4814 	  newmem = attempt_change (mii, newaddr);
4815 	  if (newmem == NULL_RTX)
4816 	    goto next;
4817 	  if (sched_verbose >= 5)
4818 	    fprintf (sched_dump, "successful address replacement\n");
4819 	  desc = XCNEW (struct dep_replacement);
4820 	  DEP_REPLACE (dep) = desc;
4821 	  desc->loc = mii->mem_loc;
4822 	  desc->newval = newmem;
4823 	  desc->orig = *desc->loc;
4824 	  desc->insn = mii->mem_insn;
4825 	  move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4826 			 INSN_SPEC_BACK_DEPS (con));
4827 	  if (backwards)
4828 	    {
4829 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4830 		add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4831 				  REG_DEP_TRUE);
4832 	    }
4833 	  else
4834 	    {
4835 	      FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4836 		add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4837 				  REG_DEP_ANTI);
4838 	    }
4839 	  return true;
4840 	}
4841     next:
4842       sd_iterator_next (&sd_it);
4843     }
4844   return false;
4845 }
4846 
4847 /* A recursive function that walks ADDRESS_OF_X to find memory references
4848    which could be modified during scheduling.  We call find_inc for each
4849    one we find that has a recognizable form.  MII holds information about
4850    the pair of memory/increment instructions.
4851    We ensure that every instruction with a memory reference (which will be
4852    the location of the replacement) is assigned at most one breakable
4853    dependency.  */
4854 
4855 static bool
find_mem(struct mem_inc_info * mii,rtx * address_of_x)4856 find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4857 {
4858   rtx x = *address_of_x;
4859   enum rtx_code code = GET_CODE (x);
4860   const char *const fmt = GET_RTX_FORMAT (code);
4861   int i;
4862 
4863   if (code == MEM)
4864     {
4865       rtx reg0 = XEXP (x, 0);
4866 
4867       mii->mem_loc = address_of_x;
4868       mii->mem_index = NULL_RTX;
4869       mii->mem_constant = 0;
4870       if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4871 	{
4872 	  mii->mem_constant = INTVAL (XEXP (reg0, 1));
4873 	  reg0 = XEXP (reg0, 0);
4874 	}
4875       if (GET_CODE (reg0) == PLUS)
4876 	{
4877 	  mii->mem_index = XEXP (reg0, 1);
4878 	  reg0 = XEXP (reg0, 0);
4879 	}
4880       if (REG_P (reg0))
4881 	{
4882 	  df_ref use;
4883 	  int occurrences = 0;
4884 
4885 	  /* Make sure this reg appears only once in this insn.  Can't use
4886 	     count_occurrences since that only works for pseudos.  */
4887 	  FOR_EACH_INSN_USE (use, mii->mem_insn)
4888 	    if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4889 	      if (++occurrences > 1)
4890 		{
4891 		  if (sched_verbose >= 5)
4892 		    fprintf (sched_dump, "mem count failure\n");
4893 		  return false;
4894 		}
4895 
4896 	  mii->mem_reg0 = reg0;
4897 	  return find_inc (mii, true) || find_inc (mii, false);
4898 	}
4899       return false;
4900     }
4901 
4902   if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4903     {
4904       /* If REG occurs inside a MEM used in a bit-field reference,
4905 	 that is unacceptable.  */
4906       return false;
4907     }
4908 
4909   /* Time for some deep diving.  */
4910   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4911     {
4912       if (fmt[i] == 'e')
4913 	{
4914 	  if (find_mem (mii, &XEXP (x, i)))
4915 	    return true;
4916 	}
4917       else if (fmt[i] == 'E')
4918 	{
4919 	  int j;
4920 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4921 	    if (find_mem (mii, &XVECEXP (x, i, j)))
4922 	      return true;
4923 	}
4924     }
4925   return false;
4926 }
4927 
4928 
4929 /* Examine the instructions between HEAD and TAIL and try to find
4930    dependencies that can be broken by modifying one of the patterns.  */
4931 
4932 void
find_modifiable_mems(rtx_insn * head,rtx_insn * tail)4933 find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
4934 {
4935   rtx_insn *insn, *next_tail = NEXT_INSN (tail);
4936   int success_in_block = 0;
4937 
4938   for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
4939     {
4940       struct mem_inc_info mii;
4941 
4942       if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
4943 	continue;
4944 
4945       mii.mem_insn = insn;
4946       if (find_mem (&mii, &PATTERN (insn)))
4947 	success_in_block++;
4948     }
4949   if (success_in_block && sched_verbose >= 5)
4950     fprintf (sched_dump, "%d candidates for address modification found.\n",
4951 	     success_in_block);
4952 }
4953 
4954 #endif /* INSN_SCHEDULING */
4955