xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/combine.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* Optimize by combining instructions for GNU compiler.
2    Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21    Portable Optimizer, but redone to work on our list-structured
22    representation for RTL instead of their string representation.
23 
24    The LOG_LINKS of each insn identify the most recent assignment
25    to each REG used in the insn.  It is a list of previous insns,
26    each of which contains a SET for a REG that is used in this insn
27    and not used or set in between.  LOG_LINKs never cross basic blocks.
28    They were set up by the preceding pass (lifetime analysis).
29 
30    We try to combine each pair of insns joined by a logical link.
31    We also try to combine triplets of insns A, B and C when C has
32    a link back to B and B has a link back to A.  Likewise for a
33    small number of quadruplets of insns A, B, C and D for which
34    there's high likelihood of success.
35 
36    LOG_LINKS does not have links for use of the CC0.  They don't
37    need to, because the insn that sets the CC0 is always immediately
38    before the insn that tests it.  So we always regard a branch
39    insn as having a logical link to the preceding insn.  The same is true
40    for an insn explicitly using CC0.
41 
42    We check (with modified_between_p) to avoid combining in such a way
43    as to move a computation to a place where its value would be different.
44 
45    Combination is done by mathematically substituting the previous
46    insn(s) values for the regs they set into the expressions in
47    the later insns that refer to these regs.  If the result is a valid insn
48    for our target machine, according to the machine description,
49    we install it, delete the earlier insns, and update the data flow
50    information (LOG_LINKS and REG_NOTES) for what we did.
51 
52    There are a few exceptions where the dataflow information isn't
53    completely updated (however this is only a local issue since it is
54    regenerated before the next pass that uses it):
55 
56    - reg_live_length is not updated
57    - reg_n_refs is not adjusted in the rare case when a register is
58      no longer required in a computation
59    - there are extremely rare cases (see distribute_notes) when a
60      REG_DEAD note is lost
61    - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62      removed because there is no way to know which register it was
63      linking
64 
65    To simplify substitution, we combine only when the earlier insn(s)
66    consist of only a single assignment.  To simplify updating afterward,
67    we never combine when a subroutine call appears in the middle.
68 
69    Since we do not represent assignments to CC0 explicitly except when that
70    is all an insn does, there is no LOG_LINKS entry in an insn that uses
71    the condition code for the insn that set the condition code.
72    Fortunately, these two insns must be consecutive.
73    Therefore, every JUMP_INSN is taken to have an implicit logical link
74    to the preceding insn.  This is not quite right, since non-jumps can
75    also use the condition code; but in practice such insns would not
76    combine anyway.  */
77 
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "expr.h"
103 #include "params.h"
104 #include "tree-pass.h"
105 #include "valtrack.h"
106 #include "rtl-iter.h"
107 #include "print-rtl.h"
108 
109 /* Number of attempts to combine instructions in this function.  */
110 
111 static int combine_attempts;
112 
113 /* Number of attempts that got as far as substitution in this function.  */
114 
115 static int combine_merges;
116 
117 /* Number of instructions combined with added SETs in this function.  */
118 
119 static int combine_extras;
120 
121 /* Number of instructions combined in this function.  */
122 
123 static int combine_successes;
124 
125 /* Totals over entire compilation.  */
126 
127 static int total_attempts, total_merges, total_extras, total_successes;
128 
129 /* combine_instructions may try to replace the right hand side of the
130    second instruction with the value of an associated REG_EQUAL note
131    before throwing it at try_combine.  That is problematic when there
132    is a REG_DEAD note for a register used in the old right hand side
133    and can cause distribute_notes to do wrong things.  This is the
134    second instruction if it has been so modified, null otherwise.  */
135 
136 static rtx_insn *i2mod;
137 
138 /* When I2MOD is nonnull, this is a copy of the old right hand side.  */
139 
140 static rtx i2mod_old_rhs;
141 
142 /* When I2MOD is nonnull, this is a copy of the new right hand side.  */
143 
144 static rtx i2mod_new_rhs;
145 
146 struct reg_stat_type {
147   /* Record last point of death of (hard or pseudo) register n.  */
148   rtx_insn			*last_death;
149 
150   /* Record last point of modification of (hard or pseudo) register n.  */
151   rtx_insn			*last_set;
152 
153   /* The next group of fields allows the recording of the last value assigned
154      to (hard or pseudo) register n.  We use this information to see if an
155      operation being processed is redundant given a prior operation performed
156      on the register.  For example, an `and' with a constant is redundant if
157      all the zero bits are already known to be turned off.
158 
159      We use an approach similar to that used by cse, but change it in the
160      following ways:
161 
162      (1) We do not want to reinitialize at each label.
163      (2) It is useful, but not critical, to know the actual value assigned
164 	 to a register.  Often just its form is helpful.
165 
166      Therefore, we maintain the following fields:
167 
168      last_set_value		the last value assigned
169      last_set_label		records the value of label_tick when the
170 				register was assigned
171      last_set_table_tick	records the value of label_tick when a
172 				value using the register is assigned
173      last_set_invalid		set to nonzero when it is not valid
174 				to use the value of this register in some
175 				register's value
176 
177      To understand the usage of these tables, it is important to understand
178      the distinction between the value in last_set_value being valid and
179      the register being validly contained in some other expression in the
180      table.
181 
182      (The next two parameters are out of date).
183 
184      reg_stat[i].last_set_value is valid if it is nonzero, and either
185      reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 
187      Register I may validly appear in any expression returned for the value
188      of another register if reg_n_sets[i] is 1.  It may also appear in the
189      value for register J if reg_stat[j].last_set_invalid is zero, or
190      reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 
192      If an expression is found in the table containing a register which may
193      not validly appear in an expression, the register is replaced by
194      something that won't match, (clobber (const_int 0)).  */
195 
196   /* Record last value assigned to (hard or pseudo) register n.  */
197 
198   rtx				last_set_value;
199 
200   /* Record the value of label_tick when an expression involving register n
201      is placed in last_set_value.  */
202 
203   int				last_set_table_tick;
204 
205   /* Record the value of label_tick when the value for register n is placed in
206      last_set_value.  */
207 
208   int				last_set_label;
209 
210   /* These fields are maintained in parallel with last_set_value and are
211      used to store the mode in which the register was last set, the bits
212      that were known to be zero when it was last set, and the number of
213      sign bits copies it was known to have when it was last set.  */
214 
215   unsigned HOST_WIDE_INT	last_set_nonzero_bits;
216   char				last_set_sign_bit_copies;
217   ENUM_BITFIELD(machine_mode)	last_set_mode : 8;
218 
219   /* Set nonzero if references to register n in expressions should not be
220      used.  last_set_invalid is set nonzero when this register is being
221      assigned to and last_set_table_tick == label_tick.  */
222 
223   char				last_set_invalid;
224 
225   /* Some registers that are set more than once and used in more than one
226      basic block are nevertheless always set in similar ways.  For example,
227      a QImode register may be loaded from memory in two places on a machine
228      where byte loads zero extend.
229 
230      We record in the following fields if a register has some leading bits
231      that are always equal to the sign bit, and what we know about the
232      nonzero bits of a register, specifically which bits are known to be
233      zero.
234 
235      If an entry is zero, it means that we don't know anything special.  */
236 
237   unsigned char			sign_bit_copies;
238 
239   unsigned HOST_WIDE_INT	nonzero_bits;
240 
241   /* Record the value of the label_tick when the last truncation
242      happened.  The field truncated_to_mode is only valid if
243      truncation_label == label_tick.  */
244 
245   int				truncation_label;
246 
247   /* Record the last truncation seen for this register.  If truncation
248      is not a nop to this mode we might be able to save an explicit
249      truncation if we know that value already contains a truncated
250      value.  */
251 
252   ENUM_BITFIELD(machine_mode)	truncated_to_mode : 8;
253 };
254 
255 
256 static vec<reg_stat_type> reg_stat;
257 
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259    regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260    but during combine_split_insns new pseudos can be created.  As we don't have
261    updated DF information in that case, it is hard to initialize the array
262    after growing.  The combiner only cares about REG_N_SETS (regno) == 1,
263    so instead of growing the arrays, just assume all newly created pseudos
264    during combine might be set multiple times.  */
265 
266 static unsigned int reg_n_sets_max;
267 
268 /* Record the luid of the last insn that invalidated memory
269    (anything that writes memory, and subroutine calls, but not pushes).  */
270 
271 static int mem_last_set;
272 
273 /* Record the luid of the last CALL_INSN
274    so we can tell whether a potential combination crosses any calls.  */
275 
276 static int last_call_luid;
277 
278 /* When `subst' is called, this is the insn that is being modified
279    (by combining in a previous insn).  The PATTERN of this insn
280    is still the old pattern partially modified and it should not be
281    looked at, but this may be used to examine the successors of the insn
282    to judge whether a simplification is valid.  */
283 
284 static rtx_insn *subst_insn;
285 
286 /* This is the lowest LUID that `subst' is currently dealing with.
287    get_last_value will not return a value if the register was set at or
288    after this LUID.  If not for this mechanism, we could get confused if
289    I2 or I1 in try_combine were an insn that used the old value of a register
290    to obtain a new value.  In that case, we might erroneously get the
291    new value of the register when we wanted the old one.  */
292 
293 static int subst_low_luid;
294 
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296    must consider all these registers to be always live.  */
297 
298 static HARD_REG_SET newpat_used_regs;
299 
300 /* This is an insn to which a LOG_LINKS entry has been added.  If this
301    insn is the earlier than I2 or I3, combine should rescan starting at
302    that location.  */
303 
304 static rtx_insn *added_links_insn;
305 
306 /* And similarly, for notes.  */
307 
308 static rtx_insn *added_notes_insn;
309 
310 /* Basic block in which we are performing combines.  */
311 static basic_block this_basic_block;
312 static bool optimize_this_for_speed_p;
313 
314 
315 /* Length of the currently allocated uid_insn_cost array.  */
316 
317 static int max_uid_known;
318 
319 /* The following array records the insn_cost for every insn
320    in the instruction stream.  */
321 
322 static int *uid_insn_cost;
323 
324 /* The following array records the LOG_LINKS for every insn in the
325    instruction stream as struct insn_link pointers.  */
326 
327 struct insn_link {
328   rtx_insn *insn;
329   unsigned int regno;
330   struct insn_link *next;
331 };
332 
333 static struct insn_link **uid_log_links;
334 
335 static inline int
336 insn_uid_check (const_rtx insn)
337 {
338   int uid = INSN_UID (insn);
339   gcc_checking_assert (uid <= max_uid_known);
340   return uid;
341 }
342 
343 #define INSN_COST(INSN)		(uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN)		(uid_log_links[insn_uid_check (INSN)])
345 
346 #define FOR_EACH_LOG_LINK(L, INSN)				\
347   for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348 
349 /* Links for LOG_LINKS are allocated from this obstack.  */
350 
351 static struct obstack insn_link_obstack;
352 
353 /* Allocate a link.  */
354 
355 static inline struct insn_link *
356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
357 {
358   struct insn_link *l
359     = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 					  sizeof (struct insn_link));
361   l->insn = insn;
362   l->regno = regno;
363   l->next = next;
364   return l;
365 }
366 
367 /* Incremented for each basic block.  */
368 
369 static int label_tick;
370 
371 /* Reset to label_tick for each extended basic block in scanning order.  */
372 
373 static int label_tick_ebb_start;
374 
375 /* Mode used to compute significance in reg_stat[].nonzero_bits.  It is the
376    largest integer mode that can fit in HOST_BITS_PER_WIDE_INT.  */
377 
378 static scalar_int_mode nonzero_bits_mode;
379 
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381    be safely used.  It is zero while computing them and after combine has
382    completed.  This former test prevents propagating values based on
383    previously set values, which can be incorrect if a variable is modified
384    in a loop.  */
385 
386 static int nonzero_sign_valid;
387 
388 
389 /* Record one modification to rtl structure
390    to be undone by storing old_contents into *where.  */
391 
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393 
394 struct undo
395 {
396   struct undo *next;
397   enum undo_kind kind;
398   union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399   union { rtx *r; int *i; struct insn_link **l; } where;
400 };
401 
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403    num_undo says how many are currently recorded.
404 
405    other_insn is nonzero if we have modified some other insn in the process
406    of working on subst_insn.  It must be verified too.  */
407 
408 struct undobuf
409 {
410   struct undo *undos;
411   struct undo *frees;
412   rtx_insn *other_insn;
413 };
414 
415 static struct undobuf undobuf;
416 
417 /* Number of times the pseudo being substituted for
418    was found and replaced.  */
419 
420 static int n_occurrences;
421 
422 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
423 					 scalar_int_mode,
424 					 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
426 						scalar_int_mode,
427 						unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 			  rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 			      int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 			    rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
453 			      unsigned HOST_WIDE_INT *);
454 static rtx canon_reg_for_combine (rtx, rtx);
455 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
456 			      scalar_int_mode, unsigned HOST_WIDE_INT, int);
457 static rtx force_to_mode (rtx, machine_mode,
458 			  unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
466 				     unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
468 				   unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 			    HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 				 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 					     rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
498 
499 
500 /* It is not safe to use ordinary gen_lowpart in combine.
501    See comments in gen_lowpart_for_combine.  */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART              gen_lowpart_for_combine
504 
505 /* Our implementation of gen_lowpart never emits a new pseudo.  */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT      gen_lowpart_for_combine
508 
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS     reg_nonzero_bits_for_combine
511 
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES  reg_num_sign_bit_copies_for_combine
514 
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE    reg_truncated_to_mode
517 
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
519 
520 
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522    Target hooks cannot use enum rtx_code.  */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 				bool op0_preserve_value)
526 {
527   int code_int = (int)*code;
528   targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529   *code = (enum rtx_code)code_int;
530 }
531 
532 /* Try to split PATTERN found in INSN.  This returns NULL_RTX if
533    PATTERN cannot be split.  Otherwise, it returns an insn sequence.
534    This is a wrapper around split_insns which ensures that the
535    reg_stat vector is made larger if the splitter creates a new
536    register.  */
537 
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx_insn *insn)
540 {
541   rtx_insn *ret;
542   unsigned int nregs;
543 
544   ret = split_insns (pattern, insn);
545   nregs = max_reg_num ();
546   if (nregs > reg_stat.length ())
547     reg_stat.safe_grow_cleared (nregs);
548   return ret;
549 }
550 
551 /* This is used by find_single_use to locate an rtx in LOC that
552    contains exactly one use of DEST, which is typically either a REG
553    or CC0.  It returns a pointer to the innermost rtx expression
554    containing DEST.  Appearances of DEST that are being used to
555    totally replace it are not counted.  */
556 
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
559 {
560   rtx x = *loc;
561   enum rtx_code code = GET_CODE (x);
562   rtx *result = NULL;
563   rtx *this_result;
564   int i;
565   const char *fmt;
566 
567   switch (code)
568     {
569     case CONST:
570     case LABEL_REF:
571     case SYMBOL_REF:
572     CASE_CONST_ANY:
573     case CLOBBER:
574     case CLOBBER_HIGH:
575       return 0;
576 
577     case SET:
578       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
579 	 of a REG that occupies all of the REG, the insn uses DEST if
580 	 it is mentioned in the destination or the source.  Otherwise, we
581 	 need just check the source.  */
582       if (GET_CODE (SET_DEST (x)) != CC0
583 	  && GET_CODE (SET_DEST (x)) != PC
584 	  && !REG_P (SET_DEST (x))
585 	  && ! (GET_CODE (SET_DEST (x)) == SUBREG
586 		&& REG_P (SUBREG_REG (SET_DEST (x)))
587 		&& !read_modify_subreg_p (SET_DEST (x))))
588 	break;
589 
590       return find_single_use_1 (dest, &SET_SRC (x));
591 
592     case MEM:
593     case SUBREG:
594       return find_single_use_1 (dest, &XEXP (x, 0));
595 
596     default:
597       break;
598     }
599 
600   /* If it wasn't one of the common cases above, check each expression and
601      vector of this code.  Look for a unique usage of DEST.  */
602 
603   fmt = GET_RTX_FORMAT (code);
604   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
605     {
606       if (fmt[i] == 'e')
607 	{
608 	  if (dest == XEXP (x, i)
609 	      || (REG_P (dest) && REG_P (XEXP (x, i))
610 		  && REGNO (dest) == REGNO (XEXP (x, i))))
611 	    this_result = loc;
612 	  else
613 	    this_result = find_single_use_1 (dest, &XEXP (x, i));
614 
615 	  if (result == NULL)
616 	    result = this_result;
617 	  else if (this_result)
618 	    /* Duplicate usage.  */
619 	    return NULL;
620 	}
621       else if (fmt[i] == 'E')
622 	{
623 	  int j;
624 
625 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
626 	    {
627 	      if (XVECEXP (x, i, j) == dest
628 		  || (REG_P (dest)
629 		      && REG_P (XVECEXP (x, i, j))
630 		      && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
631 		this_result = loc;
632 	      else
633 		this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634 
635 	      if (result == NULL)
636 		result = this_result;
637 	      else if (this_result)
638 		return NULL;
639 	    }
640 	}
641     }
642 
643   return result;
644 }
645 
646 
647 /* See if DEST, produced in INSN, is used only a single time in the
648    sequel.  If so, return a pointer to the innermost rtx expression in which
649    it is used.
650 
651    If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652 
653    If DEST is cc0_rtx, we look only at the next insn.  In that case, we don't
654    care about REG_DEAD notes or LOG_LINKS.
655 
656    Otherwise, we find the single use by finding an insn that has a
657    LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST.  If DEST is
658    only referenced once in that insn, we know that it must be the first
659    and last insn referencing DEST.  */
660 
661 static rtx *
662 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
663 {
664   basic_block bb;
665   rtx_insn *next;
666   rtx *result;
667   struct insn_link *link;
668 
669   if (dest == cc0_rtx)
670     {
671       next = NEXT_INSN (insn);
672       if (next == 0
673 	  || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
674 	return 0;
675 
676       result = find_single_use_1 (dest, &PATTERN (next));
677       if (result && ploc)
678 	*ploc = next;
679       return result;
680     }
681 
682   if (!REG_P (dest))
683     return 0;
684 
685   bb = BLOCK_FOR_INSN (insn);
686   for (next = NEXT_INSN (insn);
687        next && BLOCK_FOR_INSN (next) == bb;
688        next = NEXT_INSN (next))
689     if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
690       {
691 	FOR_EACH_LOG_LINK (link, next)
692 	  if (link->insn == insn && link->regno == REGNO (dest))
693 	    break;
694 
695 	if (link)
696 	  {
697 	    result = find_single_use_1 (dest, &PATTERN (next));
698 	    if (ploc)
699 	      *ploc = next;
700 	    return result;
701 	  }
702       }
703 
704   return 0;
705 }
706 
707 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
708    insn.  The substitution can be undone by undo_all.  If INTO is already
709    set to NEWVAL, do not record this change.  Because computing NEWVAL might
710    also call SUBST, we have to compute it before we put anything into
711    the undo table.  */
712 
713 static void
714 do_SUBST (rtx *into, rtx newval)
715 {
716   struct undo *buf;
717   rtx oldval = *into;
718 
719   if (oldval == newval)
720     return;
721 
722   /* We'd like to catch as many invalid transformations here as
723      possible.  Unfortunately, there are way too many mode changes
724      that are perfectly valid, so we'd waste too much effort for
725      little gain doing the checks here.  Focus on catching invalid
726      transformations involving integer constants.  */
727   if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
728       && CONST_INT_P (newval))
729     {
730       /* Sanity check that we're replacing oldval with a CONST_INT
731 	 that is a valid sign-extension for the original mode.  */
732       gcc_assert (INTVAL (newval)
733 		  == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734 
735       /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
736 	 CONST_INT is not valid, because after the replacement, the
737 	 original mode would be gone.  Unfortunately, we can't tell
738 	 when do_SUBST is called to replace the operand thereof, so we
739 	 perform this test on oldval instead, checking whether an
740 	 invalid replacement took place before we got here.  */
741       gcc_assert (!(GET_CODE (oldval) == SUBREG
742 		    && CONST_INT_P (SUBREG_REG (oldval))));
743       gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
744 		    && CONST_INT_P (XEXP (oldval, 0))));
745     }
746 
747   if (undobuf.frees)
748     buf = undobuf.frees, undobuf.frees = buf->next;
749   else
750     buf = XNEW (struct undo);
751 
752   buf->kind = UNDO_RTX;
753   buf->where.r = into;
754   buf->old_contents.r = oldval;
755   *into = newval;
756 
757   buf->next = undobuf.undos, undobuf.undos = buf;
758 }
759 
760 #define SUBST(INTO, NEWVAL)	do_SUBST (&(INTO), (NEWVAL))
761 
762 /* Similar to SUBST, but NEWVAL is an int expression.  Note that substitution
763    for the value of a HOST_WIDE_INT value (including CONST_INT) is
764    not safe.  */
765 
766 static void
767 do_SUBST_INT (int *into, int newval)
768 {
769   struct undo *buf;
770   int oldval = *into;
771 
772   if (oldval == newval)
773     return;
774 
775   if (undobuf.frees)
776     buf = undobuf.frees, undobuf.frees = buf->next;
777   else
778     buf = XNEW (struct undo);
779 
780   buf->kind = UNDO_INT;
781   buf->where.i = into;
782   buf->old_contents.i = oldval;
783   *into = newval;
784 
785   buf->next = undobuf.undos, undobuf.undos = buf;
786 }
787 
788 #define SUBST_INT(INTO, NEWVAL)  do_SUBST_INT (&(INTO), (NEWVAL))
789 
790 /* Similar to SUBST, but just substitute the mode.  This is used when
791    changing the mode of a pseudo-register, so that any other
792    references to the entry in the regno_reg_rtx array will change as
793    well.  */
794 
795 static void
796 do_SUBST_MODE (rtx *into, machine_mode newval)
797 {
798   struct undo *buf;
799   machine_mode oldval = GET_MODE (*into);
800 
801   if (oldval == newval)
802     return;
803 
804   if (undobuf.frees)
805     buf = undobuf.frees, undobuf.frees = buf->next;
806   else
807     buf = XNEW (struct undo);
808 
809   buf->kind = UNDO_MODE;
810   buf->where.r = into;
811   buf->old_contents.m = oldval;
812   adjust_reg_mode (*into, newval);
813 
814   buf->next = undobuf.undos, undobuf.undos = buf;
815 }
816 
817 #define SUBST_MODE(INTO, NEWVAL)  do_SUBST_MODE (&(INTO), (NEWVAL))
818 
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression.  */
820 
821 static void
822 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 {
824   struct undo *buf;
825   struct insn_link * oldval = *into;
826 
827   if (oldval == newval)
828     return;
829 
830   if (undobuf.frees)
831     buf = undobuf.frees, undobuf.frees = buf->next;
832   else
833     buf = XNEW (struct undo);
834 
835   buf->kind = UNDO_LINKS;
836   buf->where.l = into;
837   buf->old_contents.l = oldval;
838   *into = newval;
839 
840   buf->next = undobuf.undos, undobuf.undos = buf;
841 }
842 
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 
845 /* Subroutine of try_combine.  Determine whether the replacement patterns
846    NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
847    than the original sequence I0, I1, I2, I3 and undobuf.other_insn.  Note
848    that I0, I1 and/or NEWI2PAT may be NULL_RTX.  Similarly, NEWOTHERPAT and
849    undobuf.other_insn may also both be NULL_RTX.  Return false if the cost
850    of all the instructions can be estimated and the replacements are more
851    expensive than the original sequence.  */
852 
853 static bool
854 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
855 		       rtx newpat, rtx newi2pat, rtx newotherpat)
856 {
857   int i0_cost, i1_cost, i2_cost, i3_cost;
858   int new_i2_cost, new_i3_cost;
859   int old_cost, new_cost;
860 
861   /* Lookup the original insn_costs.  */
862   i2_cost = INSN_COST (i2);
863   i3_cost = INSN_COST (i3);
864 
865   if (i1)
866     {
867       i1_cost = INSN_COST (i1);
868       if (i0)
869 	{
870 	  i0_cost = INSN_COST (i0);
871 	  old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
872 		      ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
873 	}
874       else
875 	{
876 	  old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
877 		      ? i1_cost + i2_cost + i3_cost : 0);
878 	  i0_cost = 0;
879 	}
880     }
881   else
882     {
883       old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
884       i1_cost = i0_cost = 0;
885     }
886 
887   /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
888      correct that.  */
889   if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
890     old_cost -= i1_cost;
891 
892 
893   /* Calculate the replacement insn_costs.  */
894   rtx tmp = PATTERN (i3);
895   PATTERN (i3) = newpat;
896   int tmpi = INSN_CODE (i3);
897   INSN_CODE (i3) = -1;
898   new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
899   PATTERN (i3) = tmp;
900   INSN_CODE (i3) = tmpi;
901   if (newi2pat)
902     {
903       tmp = PATTERN (i2);
904       PATTERN (i2) = newi2pat;
905       tmpi = INSN_CODE (i2);
906       INSN_CODE (i2) = -1;
907       new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
908       PATTERN (i2) = tmp;
909       INSN_CODE (i2) = tmpi;
910       new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
911 		 ? new_i2_cost + new_i3_cost : 0;
912     }
913   else
914     {
915       new_cost = new_i3_cost;
916       new_i2_cost = 0;
917     }
918 
919   if (undobuf.other_insn)
920     {
921       int old_other_cost, new_other_cost;
922 
923       old_other_cost = INSN_COST (undobuf.other_insn);
924       tmp = PATTERN (undobuf.other_insn);
925       PATTERN (undobuf.other_insn) = newotherpat;
926       tmpi = INSN_CODE (undobuf.other_insn);
927       INSN_CODE (undobuf.other_insn) = -1;
928       new_other_cost = insn_cost (undobuf.other_insn,
929 				  optimize_this_for_speed_p);
930       PATTERN (undobuf.other_insn) = tmp;
931       INSN_CODE (undobuf.other_insn) = tmpi;
932       if (old_other_cost > 0 && new_other_cost > 0)
933 	{
934 	  old_cost += old_other_cost;
935 	  new_cost += new_other_cost;
936 	}
937       else
938 	old_cost = 0;
939     }
940 
941   /* Disallow this combination if both new_cost and old_cost are greater than
942      zero, and new_cost is greater than old cost.  */
943   int reject = old_cost > 0 && new_cost > old_cost;
944 
945   if (dump_file)
946     {
947       fprintf (dump_file, "%s combination of insns ",
948 	       reject ? "rejecting" : "allowing");
949       if (i0)
950 	fprintf (dump_file, "%d, ", INSN_UID (i0));
951       if (i1 && INSN_UID (i1) != INSN_UID (i2))
952 	fprintf (dump_file, "%d, ", INSN_UID (i1));
953       fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
954 
955       fprintf (dump_file, "original costs ");
956       if (i0)
957 	fprintf (dump_file, "%d + ", i0_cost);
958       if (i1 && INSN_UID (i1) != INSN_UID (i2))
959 	fprintf (dump_file, "%d + ", i1_cost);
960       fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
961 
962       if (newi2pat)
963 	fprintf (dump_file, "replacement costs %d + %d = %d\n",
964 		 new_i2_cost, new_i3_cost, new_cost);
965       else
966 	fprintf (dump_file, "replacement cost %d\n", new_cost);
967     }
968 
969   if (reject)
970     return false;
971 
972   /* Update the uid_insn_cost array with the replacement costs.  */
973   INSN_COST (i2) = new_i2_cost;
974   INSN_COST (i3) = new_i3_cost;
975   if (i1)
976     {
977       INSN_COST (i1) = 0;
978       if (i0)
979 	INSN_COST (i0) = 0;
980     }
981 
982   return true;
983 }
984 
985 
986 /* Delete any insns that copy a register to itself.
987    Return true if the CFG was changed.  */
988 
989 static bool
990 delete_noop_moves (void)
991 {
992   rtx_insn *insn, *next;
993   basic_block bb;
994 
995   bool edges_deleted = false;
996 
997   FOR_EACH_BB_FN (bb, cfun)
998     {
999       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
1000 	{
1001 	  next = NEXT_INSN (insn);
1002 	  if (INSN_P (insn) && noop_move_p (insn))
1003 	    {
1004 	      if (dump_file)
1005 		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1006 
1007 	      edges_deleted |= delete_insn_and_edges (insn);
1008 	    }
1009 	}
1010     }
1011 
1012   return edges_deleted;
1013 }
1014 
1015 
1016 /* Return false if we do not want to (or cannot) combine DEF.  */
1017 static bool
1018 can_combine_def_p (df_ref def)
1019 {
1020   /* Do not consider if it is pre/post modification in MEM.  */
1021   if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1022     return false;
1023 
1024   unsigned int regno = DF_REF_REGNO (def);
1025 
1026   /* Do not combine frame pointer adjustments.  */
1027   if ((regno == FRAME_POINTER_REGNUM
1028        && (!reload_completed || frame_pointer_needed))
1029       || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1030 	  && regno == HARD_FRAME_POINTER_REGNUM
1031 	  && (!reload_completed || frame_pointer_needed))
1032       || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1033 	  && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1034     return false;
1035 
1036   return true;
1037 }
1038 
1039 /* Return false if we do not want to (or cannot) combine USE.  */
1040 static bool
1041 can_combine_use_p (df_ref use)
1042 {
1043   /* Do not consider the usage of the stack pointer by function call.  */
1044   if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1045     return false;
1046 
1047   return true;
1048 }
1049 
1050 /* Fill in log links field for all insns.  */
1051 
1052 static void
1053 create_log_links (void)
1054 {
1055   basic_block bb;
1056   rtx_insn **next_use;
1057   rtx_insn *insn;
1058   df_ref def, use;
1059 
1060   next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1061 
1062   /* Pass through each block from the end, recording the uses of each
1063      register and establishing log links when def is encountered.
1064      Note that we do not clear next_use array in order to save time,
1065      so we have to test whether the use is in the same basic block as def.
1066 
1067      There are a few cases below when we do not consider the definition or
1068      usage -- these are taken from original flow.c did. Don't ask me why it is
1069      done this way; I don't know and if it works, I don't want to know.  */
1070 
1071   FOR_EACH_BB_FN (bb, cfun)
1072     {
1073       FOR_BB_INSNS_REVERSE (bb, insn)
1074         {
1075           if (!NONDEBUG_INSN_P (insn))
1076             continue;
1077 
1078 	  /* Log links are created only once.  */
1079 	  gcc_assert (!LOG_LINKS (insn));
1080 
1081 	  FOR_EACH_INSN_DEF (def, insn)
1082             {
1083               unsigned int regno = DF_REF_REGNO (def);
1084               rtx_insn *use_insn;
1085 
1086               if (!next_use[regno])
1087                 continue;
1088 
1089 	      if (!can_combine_def_p (def))
1090 		continue;
1091 
1092 	      use_insn = next_use[regno];
1093 	      next_use[regno] = NULL;
1094 
1095 	      if (BLOCK_FOR_INSN (use_insn) != bb)
1096 		continue;
1097 
1098 	      /* flow.c claimed:
1099 
1100 		 We don't build a LOG_LINK for hard registers contained
1101 		 in ASM_OPERANDs.  If these registers get replaced,
1102 		 we might wind up changing the semantics of the insn,
1103 		 even if reload can make what appear to be valid
1104 		 assignments later.  */
1105 	      if (regno < FIRST_PSEUDO_REGISTER
1106 		  && asm_noperands (PATTERN (use_insn)) >= 0)
1107 		continue;
1108 
1109 	      /* Don't add duplicate links between instructions.  */
1110 	      struct insn_link *links;
1111 	      FOR_EACH_LOG_LINK (links, use_insn)
1112 	        if (insn == links->insn && regno == links->regno)
1113 		  break;
1114 
1115 	      if (!links)
1116 		LOG_LINKS (use_insn)
1117 		  = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1118             }
1119 
1120 	  FOR_EACH_INSN_USE (use, insn)
1121 	    if (can_combine_use_p (use))
1122 	      next_use[DF_REF_REGNO (use)] = insn;
1123         }
1124     }
1125 
1126   free (next_use);
1127 }
1128 
1129 /* Walk the LOG_LINKS of insn B to see if we find a reference to A.  Return
1130    true if we found a LOG_LINK that proves that A feeds B.  This only works
1131    if there are no instructions between A and B which could have a link
1132    depending on A, since in that case we would not record a link for B.
1133    We also check the implicit dependency created by a cc0 setter/user
1134    pair.  */
1135 
1136 static bool
1137 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1138 {
1139   struct insn_link *links;
1140   FOR_EACH_LOG_LINK (links, b)
1141     if (links->insn == a)
1142       return true;
1143   if (HAVE_cc0 && sets_cc0_p (a))
1144     return true;
1145   return false;
1146 }
1147 
1148 /* Main entry point for combiner.  F is the first insn of the function.
1149    NREGS is the first unused pseudo-reg number.
1150 
1151    Return nonzero if the CFG was changed (e.g. if the combiner has
1152    turned an indirect jump instruction into a direct jump).  */
1153 static int
1154 combine_instructions (rtx_insn *f, unsigned int nregs)
1155 {
1156   rtx_insn *insn, *next;
1157   rtx_insn *prev;
1158   struct insn_link *links, *nextlinks;
1159   rtx_insn *first;
1160   basic_block last_bb;
1161 
1162   int new_direct_jump_p = 0;
1163 
1164   for (first = f; first && !NONDEBUG_INSN_P (first); )
1165     first = NEXT_INSN (first);
1166   if (!first)
1167     return 0;
1168 
1169   combine_attempts = 0;
1170   combine_merges = 0;
1171   combine_extras = 0;
1172   combine_successes = 0;
1173 
1174   rtl_hooks = combine_rtl_hooks;
1175 
1176   reg_stat.safe_grow_cleared (nregs);
1177 
1178   init_recog_no_volatile ();
1179 
1180   /* Allocate array for insn info.  */
1181   max_uid_known = get_max_uid ();
1182   uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1183   uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1184   gcc_obstack_init (&insn_link_obstack);
1185 
1186   nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1187 
1188   /* Don't use reg_stat[].nonzero_bits when computing it.  This can cause
1189      problems when, for example, we have j <<= 1 in a loop.  */
1190 
1191   nonzero_sign_valid = 0;
1192   label_tick = label_tick_ebb_start = 1;
1193 
1194   /* Scan all SETs and see if we can deduce anything about what
1195      bits are known to be zero for some registers and how many copies
1196      of the sign bit are known to exist for those registers.
1197 
1198      Also set any known values so that we can use it while searching
1199      for what bits are known to be set.  */
1200 
1201   setup_incoming_promotions (first);
1202   /* Allow the entry block and the first block to fall into the same EBB.
1203      Conceptually the incoming promotions are assigned to the entry block.  */
1204   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1205 
1206   create_log_links ();
1207   FOR_EACH_BB_FN (this_basic_block, cfun)
1208     {
1209       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1210       last_call_luid = 0;
1211       mem_last_set = -1;
1212 
1213       label_tick++;
1214       if (!single_pred_p (this_basic_block)
1215 	  || single_pred (this_basic_block) != last_bb)
1216 	label_tick_ebb_start = label_tick;
1217       last_bb = this_basic_block;
1218 
1219       FOR_BB_INSNS (this_basic_block, insn)
1220         if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1221 	  {
1222             rtx links;
1223 
1224             subst_low_luid = DF_INSN_LUID (insn);
1225             subst_insn = insn;
1226 
1227 	    note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1228 		         insn);
1229 	    record_dead_and_set_regs (insn);
1230 
1231 	    if (AUTO_INC_DEC)
1232 	      for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1233 		if (REG_NOTE_KIND (links) == REG_INC)
1234 		  set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1235 						    insn);
1236 
1237 	    /* Record the current insn_cost of this instruction.  */
1238 	    if (NONJUMP_INSN_P (insn))
1239 	      INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1240 	    if (dump_file)
1241 	      {
1242 		fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1243 		dump_insn_slim (dump_file, insn);
1244 	      }
1245 	  }
1246     }
1247 
1248   nonzero_sign_valid = 1;
1249 
1250   /* Now scan all the insns in forward order.  */
1251   label_tick = label_tick_ebb_start = 1;
1252   init_reg_last ();
1253   setup_incoming_promotions (first);
1254   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1255   int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1256 
1257   FOR_EACH_BB_FN (this_basic_block, cfun)
1258     {
1259       rtx_insn *last_combined_insn = NULL;
1260 
1261       /* Ignore instruction combination in basic blocks that are going to
1262 	 be removed as unreachable anyway.  See PR82386.  */
1263       if (EDGE_COUNT (this_basic_block->preds) == 0)
1264 	continue;
1265 
1266       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1267       last_call_luid = 0;
1268       mem_last_set = -1;
1269 
1270       label_tick++;
1271       if (!single_pred_p (this_basic_block)
1272 	  || single_pred (this_basic_block) != last_bb)
1273 	label_tick_ebb_start = label_tick;
1274       last_bb = this_basic_block;
1275 
1276       rtl_profile_for_bb (this_basic_block);
1277       for (insn = BB_HEAD (this_basic_block);
1278 	   insn != NEXT_INSN (BB_END (this_basic_block));
1279 	   insn = next ? next : NEXT_INSN (insn))
1280 	{
1281 	  next = 0;
1282 	  if (!NONDEBUG_INSN_P (insn))
1283 	    continue;
1284 
1285 	  while (last_combined_insn
1286 		 && (!NONDEBUG_INSN_P (last_combined_insn)
1287 		     || last_combined_insn->deleted ()))
1288 	    last_combined_insn = PREV_INSN (last_combined_insn);
1289 	  if (last_combined_insn == NULL_RTX
1290 	      || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1291 	      || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1292 	    last_combined_insn = insn;
1293 
1294 	  /* See if we know about function return values before this
1295 	     insn based upon SUBREG flags.  */
1296 	  check_promoted_subreg (insn, PATTERN (insn));
1297 
1298 	  /* See if we can find hardregs and subreg of pseudos in
1299 	     narrower modes.  This could help turning TRUNCATEs
1300 	     into SUBREGs.  */
1301 	  note_uses (&PATTERN (insn), record_truncated_values, NULL);
1302 
1303 	  /* Try this insn with each insn it links back to.  */
1304 
1305 	  FOR_EACH_LOG_LINK (links, insn)
1306 	    if ((next = try_combine (insn, links->insn, NULL,
1307 				     NULL, &new_direct_jump_p,
1308 				     last_combined_insn)) != 0)
1309 	      {
1310 		statistics_counter_event (cfun, "two-insn combine", 1);
1311 		goto retry;
1312 	      }
1313 
1314 	  /* Try each sequence of three linked insns ending with this one.  */
1315 
1316 	  if (max_combine >= 3)
1317 	    FOR_EACH_LOG_LINK (links, insn)
1318 	      {
1319 		rtx_insn *link = links->insn;
1320 
1321 		/* If the linked insn has been replaced by a note, then there
1322 		   is no point in pursuing this chain any further.  */
1323 		if (NOTE_P (link))
1324 		  continue;
1325 
1326 		FOR_EACH_LOG_LINK (nextlinks, link)
1327 		  if ((next = try_combine (insn, link, nextlinks->insn,
1328 					   NULL, &new_direct_jump_p,
1329 					   last_combined_insn)) != 0)
1330 		    {
1331 		      statistics_counter_event (cfun, "three-insn combine", 1);
1332 		      goto retry;
1333 		    }
1334 	      }
1335 
1336 	  /* Try to combine a jump insn that uses CC0
1337 	     with a preceding insn that sets CC0, and maybe with its
1338 	     logical predecessor as well.
1339 	     This is how we make decrement-and-branch insns.
1340 	     We need this special code because data flow connections
1341 	     via CC0 do not get entered in LOG_LINKS.  */
1342 
1343 	  if (HAVE_cc0
1344 	      && JUMP_P (insn)
1345 	      && (prev = prev_nonnote_insn (insn)) != 0
1346 	      && NONJUMP_INSN_P (prev)
1347 	      && sets_cc0_p (PATTERN (prev)))
1348 	    {
1349 	      if ((next = try_combine (insn, prev, NULL, NULL,
1350 				       &new_direct_jump_p,
1351 				       last_combined_insn)) != 0)
1352 		goto retry;
1353 
1354 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1355 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1356 					   NULL, &new_direct_jump_p,
1357 					   last_combined_insn)) != 0)
1358 		    goto retry;
1359 	    }
1360 
1361 	  /* Do the same for an insn that explicitly references CC0.  */
1362 	  if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1363 	      && (prev = prev_nonnote_insn (insn)) != 0
1364 	      && NONJUMP_INSN_P (prev)
1365 	      && sets_cc0_p (PATTERN (prev))
1366 	      && GET_CODE (PATTERN (insn)) == SET
1367 	      && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1368 	    {
1369 	      if ((next = try_combine (insn, prev, NULL, NULL,
1370 				       &new_direct_jump_p,
1371 				       last_combined_insn)) != 0)
1372 		goto retry;
1373 
1374 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1375 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1376 					   NULL, &new_direct_jump_p,
1377 					   last_combined_insn)) != 0)
1378 		    goto retry;
1379 	    }
1380 
1381 	  /* Finally, see if any of the insns that this insn links to
1382 	     explicitly references CC0.  If so, try this insn, that insn,
1383 	     and its predecessor if it sets CC0.  */
1384 	  if (HAVE_cc0)
1385 	    {
1386 	      FOR_EACH_LOG_LINK (links, insn)
1387 		if (NONJUMP_INSN_P (links->insn)
1388 		    && GET_CODE (PATTERN (links->insn)) == SET
1389 		    && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1390 		    && (prev = prev_nonnote_insn (links->insn)) != 0
1391 		    && NONJUMP_INSN_P (prev)
1392 		    && sets_cc0_p (PATTERN (prev))
1393 		    && (next = try_combine (insn, links->insn,
1394 					    prev, NULL, &new_direct_jump_p,
1395 					    last_combined_insn)) != 0)
1396 		  goto retry;
1397 	    }
1398 
1399 	  /* Try combining an insn with two different insns whose results it
1400 	     uses.  */
1401 	  if (max_combine >= 3)
1402 	    FOR_EACH_LOG_LINK (links, insn)
1403 	      for (nextlinks = links->next; nextlinks;
1404 		   nextlinks = nextlinks->next)
1405 		if ((next = try_combine (insn, links->insn,
1406 					 nextlinks->insn, NULL,
1407 					 &new_direct_jump_p,
1408 					 last_combined_insn)) != 0)
1409 
1410 		  {
1411 		    statistics_counter_event (cfun, "three-insn combine", 1);
1412 		    goto retry;
1413 		  }
1414 
1415 	  /* Try four-instruction combinations.  */
1416 	  if (max_combine >= 4)
1417 	    FOR_EACH_LOG_LINK (links, insn)
1418 	      {
1419 		struct insn_link *next1;
1420 		rtx_insn *link = links->insn;
1421 
1422 		/* If the linked insn has been replaced by a note, then there
1423 		   is no point in pursuing this chain any further.  */
1424 		if (NOTE_P (link))
1425 		  continue;
1426 
1427 		FOR_EACH_LOG_LINK (next1, link)
1428 		  {
1429 		    rtx_insn *link1 = next1->insn;
1430 		    if (NOTE_P (link1))
1431 		      continue;
1432 		    /* I0 -> I1 -> I2 -> I3.  */
1433 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1434 		      if ((next = try_combine (insn, link, link1,
1435 					       nextlinks->insn,
1436 					       &new_direct_jump_p,
1437 					       last_combined_insn)) != 0)
1438 			{
1439 			  statistics_counter_event (cfun, "four-insn combine", 1);
1440 			  goto retry;
1441 			}
1442 		    /* I0, I1 -> I2, I2 -> I3.  */
1443 		    for (nextlinks = next1->next; nextlinks;
1444 			 nextlinks = nextlinks->next)
1445 		      if ((next = try_combine (insn, link, link1,
1446 					       nextlinks->insn,
1447 					       &new_direct_jump_p,
1448 					       last_combined_insn)) != 0)
1449 			{
1450 			  statistics_counter_event (cfun, "four-insn combine", 1);
1451 			  goto retry;
1452 			}
1453 		  }
1454 
1455 		for (next1 = links->next; next1; next1 = next1->next)
1456 		  {
1457 		    rtx_insn *link1 = next1->insn;
1458 		    if (NOTE_P (link1))
1459 		      continue;
1460 		    /* I0 -> I2; I1, I2 -> I3.  */
1461 		    FOR_EACH_LOG_LINK (nextlinks, link)
1462 		      if ((next = try_combine (insn, link, link1,
1463 					       nextlinks->insn,
1464 					       &new_direct_jump_p,
1465 					       last_combined_insn)) != 0)
1466 			{
1467 			  statistics_counter_event (cfun, "four-insn combine", 1);
1468 			  goto retry;
1469 			}
1470 		    /* I0 -> I1; I1, I2 -> I3.  */
1471 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1472 		      if ((next = try_combine (insn, link, link1,
1473 					       nextlinks->insn,
1474 					       &new_direct_jump_p,
1475 					       last_combined_insn)) != 0)
1476 			{
1477 			  statistics_counter_event (cfun, "four-insn combine", 1);
1478 			  goto retry;
1479 			}
1480 		  }
1481 	      }
1482 
1483 	  /* Try this insn with each REG_EQUAL note it links back to.  */
1484 	  FOR_EACH_LOG_LINK (links, insn)
1485 	    {
1486 	      rtx set, note;
1487 	      rtx_insn *temp = links->insn;
1488 	      if ((set = single_set (temp)) != 0
1489 		  && (note = find_reg_equal_equiv_note (temp)) != 0
1490 		  && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1491 		  /* Avoid using a register that may already been marked
1492 		     dead by an earlier instruction.  */
1493 		  && ! unmentioned_reg_p (note, SET_SRC (set))
1494 		  && (GET_MODE (note) == VOIDmode
1495 		      ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1496 		      : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1497 			 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1498 			     || (GET_MODE (XEXP (SET_DEST (set), 0))
1499 				 == GET_MODE (note))))))
1500 		{
1501 		  /* Temporarily replace the set's source with the
1502 		     contents of the REG_EQUAL note.  The insn will
1503 		     be deleted or recognized by try_combine.  */
1504 		  rtx orig_src = SET_SRC (set);
1505 		  rtx orig_dest = SET_DEST (set);
1506 		  if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1507 		    SET_DEST (set) = XEXP (SET_DEST (set), 0);
1508 		  SET_SRC (set) = note;
1509 		  i2mod = temp;
1510 		  i2mod_old_rhs = copy_rtx (orig_src);
1511 		  i2mod_new_rhs = copy_rtx (note);
1512 		  next = try_combine (insn, i2mod, NULL, NULL,
1513 				      &new_direct_jump_p,
1514 				      last_combined_insn);
1515 		  i2mod = NULL;
1516 		  if (next)
1517 		    {
1518 		      statistics_counter_event (cfun, "insn-with-note combine", 1);
1519 		      goto retry;
1520 		    }
1521 		  SET_SRC (set) = orig_src;
1522 		  SET_DEST (set) = orig_dest;
1523 		}
1524 	    }
1525 
1526 	  if (!NOTE_P (insn))
1527 	    record_dead_and_set_regs (insn);
1528 
1529 retry:
1530 	  ;
1531 	}
1532     }
1533 
1534   default_rtl_profile ();
1535   clear_bb_flags ();
1536   new_direct_jump_p |= purge_all_dead_edges ();
1537   new_direct_jump_p |= delete_noop_moves ();
1538 
1539   /* Clean up.  */
1540   obstack_free (&insn_link_obstack, NULL);
1541   free (uid_log_links);
1542   free (uid_insn_cost);
1543   reg_stat.release ();
1544 
1545   {
1546     struct undo *undo, *next;
1547     for (undo = undobuf.frees; undo; undo = next)
1548       {
1549 	next = undo->next;
1550 	free (undo);
1551       }
1552     undobuf.frees = 0;
1553   }
1554 
1555   total_attempts += combine_attempts;
1556   total_merges += combine_merges;
1557   total_extras += combine_extras;
1558   total_successes += combine_successes;
1559 
1560   nonzero_sign_valid = 0;
1561   rtl_hooks = general_rtl_hooks;
1562 
1563   /* Make recognizer allow volatile MEMs again.  */
1564   init_recog ();
1565 
1566   return new_direct_jump_p;
1567 }
1568 
1569 /* Wipe the last_xxx fields of reg_stat in preparation for another pass.  */
1570 
1571 static void
1572 init_reg_last (void)
1573 {
1574   unsigned int i;
1575   reg_stat_type *p;
1576 
1577   FOR_EACH_VEC_ELT (reg_stat, i, p)
1578     memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1579 }
1580 
1581 /* Set up any promoted values for incoming argument registers.  */
1582 
1583 static void
1584 setup_incoming_promotions (rtx_insn *first)
1585 {
1586   tree arg;
1587   bool strictly_local = false;
1588 
1589   for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1590        arg = DECL_CHAIN (arg))
1591     {
1592       rtx x, reg = DECL_INCOMING_RTL (arg);
1593       int uns1, uns3;
1594       machine_mode mode1, mode2, mode3, mode4;
1595 
1596       /* Only continue if the incoming argument is in a register.  */
1597       if (!REG_P (reg))
1598 	continue;
1599 
1600       /* Determine, if possible, whether all call sites of the current
1601          function lie within the current compilation unit.  (This does
1602 	 take into account the exporting of a function via taking its
1603 	 address, and so forth.)  */
1604       strictly_local = cgraph_node::local_info (current_function_decl)->local;
1605 
1606       /* The mode and signedness of the argument before any promotions happen
1607          (equal to the mode of the pseudo holding it at that stage).  */
1608       mode1 = TYPE_MODE (TREE_TYPE (arg));
1609       uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1610 
1611       /* The mode and signedness of the argument after any source language and
1612          TARGET_PROMOTE_PROTOTYPES-driven promotions.  */
1613       mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1614       uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1615 
1616       /* The mode and signedness of the argument as it is actually passed,
1617          see assign_parm_setup_reg in function.c.  */
1618       mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1619 				     TREE_TYPE (cfun->decl), 0);
1620 
1621       /* The mode of the register in which the argument is being passed.  */
1622       mode4 = GET_MODE (reg);
1623 
1624       /* Eliminate sign extensions in the callee when:
1625 	 (a) A mode promotion has occurred;  */
1626       if (mode1 == mode3)
1627 	continue;
1628       /* (b) The mode of the register is the same as the mode of
1629 	     the argument as it is passed; */
1630       if (mode3 != mode4)
1631 	continue;
1632       /* (c) There's no language level extension;  */
1633       if (mode1 == mode2)
1634 	;
1635       /* (c.1) All callers are from the current compilation unit.  If that's
1636 	 the case we don't have to rely on an ABI, we only have to know
1637 	 what we're generating right now, and we know that we will do the
1638 	 mode1 to mode2 promotion with the given sign.  */
1639       else if (!strictly_local)
1640 	continue;
1641       /* (c.2) The combination of the two promotions is useful.  This is
1642 	 true when the signs match, or if the first promotion is unsigned.
1643 	 In the later case, (sign_extend (zero_extend x)) is the same as
1644 	 (zero_extend (zero_extend x)), so make sure to force UNS3 true.  */
1645       else if (uns1)
1646 	uns3 = true;
1647       else if (uns3)
1648 	continue;
1649 
1650       /* Record that the value was promoted from mode1 to mode3,
1651 	 so that any sign extension at the head of the current
1652 	 function may be eliminated.  */
1653       x = gen_rtx_CLOBBER (mode1, const0_rtx);
1654       x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1655       record_value_for_reg (reg, first, x);
1656     }
1657 }
1658 
1659 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1660    that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1661    because some machines (maybe most) will actually do the sign-extension and
1662    this is the conservative approach.
1663 
1664    ??? For 2.5, try to tighten up the MD files in this regard instead of this
1665    kludge.  */
1666 
1667 static rtx
1668 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1669 {
1670   scalar_int_mode int_mode;
1671   if (CONST_INT_P (src)
1672       && is_a <scalar_int_mode> (mode, &int_mode)
1673       && GET_MODE_PRECISION (int_mode) < prec
1674       && INTVAL (src) > 0
1675       && val_signbit_known_set_p (int_mode, INTVAL (src)))
1676     src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1677 
1678   return src;
1679 }
1680 
1681 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1682    and SET.  */
1683 
1684 static void
1685 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1686 			   rtx x)
1687 {
1688   rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1689   unsigned HOST_WIDE_INT bits = 0;
1690   rtx reg_equal = NULL, src = SET_SRC (set);
1691   unsigned int num = 0;
1692 
1693   if (reg_equal_note)
1694     reg_equal = XEXP (reg_equal_note, 0);
1695 
1696   if (SHORT_IMMEDIATES_SIGN_EXTEND)
1697     {
1698       src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1699       if (reg_equal)
1700 	reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1701     }
1702 
1703   /* Don't call nonzero_bits if it cannot change anything.  */
1704   if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1705     {
1706       machine_mode mode = GET_MODE (x);
1707       if (GET_MODE_CLASS (mode) == MODE_INT
1708 	  && HWI_COMPUTABLE_MODE_P (mode))
1709 	mode = nonzero_bits_mode;
1710       bits = nonzero_bits (src, mode);
1711       if (reg_equal && bits)
1712 	bits &= nonzero_bits (reg_equal, mode);
1713       rsp->nonzero_bits |= bits;
1714     }
1715 
1716   /* Don't call num_sign_bit_copies if it cannot change anything.  */
1717   if (rsp->sign_bit_copies != 1)
1718     {
1719       num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1720       if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1721 	{
1722 	  unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1723 	  if (num == 0 || numeq > num)
1724 	    num = numeq;
1725 	}
1726       if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1727 	rsp->sign_bit_copies = num;
1728     }
1729 }
1730 
1731 /* Called via note_stores.  If X is a pseudo that is narrower than
1732    HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1733 
1734    If we are setting only a portion of X and we can't figure out what
1735    portion, assume all bits will be used since we don't know what will
1736    be happening.
1737 
1738    Similarly, set how many bits of X are known to be copies of the sign bit
1739    at all locations in the function.  This is the smallest number implied
1740    by any set of X.  */
1741 
1742 static void
1743 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1744 {
1745   rtx_insn *insn = (rtx_insn *) data;
1746   scalar_int_mode mode;
1747 
1748   if (REG_P (x)
1749       && REGNO (x) >= FIRST_PSEUDO_REGISTER
1750       /* If this register is undefined at the start of the file, we can't
1751 	 say what its contents were.  */
1752       && ! REGNO_REG_SET_P
1753 	   (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1754       && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1755       && HWI_COMPUTABLE_MODE_P (mode))
1756     {
1757       reg_stat_type *rsp = &reg_stat[REGNO (x)];
1758 
1759       if (set == 0 || GET_CODE (set) == CLOBBER)
1760 	{
1761 	  rsp->nonzero_bits = GET_MODE_MASK (mode);
1762 	  rsp->sign_bit_copies = 1;
1763 	  return;
1764 	}
1765 
1766       /* Should not happen as we only using pseduo registers.  */
1767       gcc_assert (GET_CODE (set) != CLOBBER_HIGH);
1768 
1769       /* If this register is being initialized using itself, and the
1770 	 register is uninitialized in this basic block, and there are
1771 	 no LOG_LINKS which set the register, then part of the
1772 	 register is uninitialized.  In that case we can't assume
1773 	 anything about the number of nonzero bits.
1774 
1775 	 ??? We could do better if we checked this in
1776 	 reg_{nonzero_bits,num_sign_bit_copies}_for_combine.  Then we
1777 	 could avoid making assumptions about the insn which initially
1778 	 sets the register, while still using the information in other
1779 	 insns.  We would have to be careful to check every insn
1780 	 involved in the combination.  */
1781 
1782       if (insn
1783 	  && reg_referenced_p (x, PATTERN (insn))
1784 	  && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1785 			       REGNO (x)))
1786 	{
1787 	  struct insn_link *link;
1788 
1789 	  FOR_EACH_LOG_LINK (link, insn)
1790 	    if (dead_or_set_p (link->insn, x))
1791 	      break;
1792 	  if (!link)
1793 	    {
1794 	      rsp->nonzero_bits = GET_MODE_MASK (mode);
1795 	      rsp->sign_bit_copies = 1;
1796 	      return;
1797 	    }
1798 	}
1799 
1800       /* If this is a complex assignment, see if we can convert it into a
1801 	 simple assignment.  */
1802       set = expand_field_assignment (set);
1803 
1804       /* If this is a simple assignment, or we have a paradoxical SUBREG,
1805 	 set what we know about X.  */
1806 
1807       if (SET_DEST (set) == x
1808 	  || (paradoxical_subreg_p (SET_DEST (set))
1809 	      && SUBREG_REG (SET_DEST (set)) == x))
1810 	update_rsp_from_reg_equal (rsp, insn, set, x);
1811       else
1812 	{
1813 	  rsp->nonzero_bits = GET_MODE_MASK (mode);
1814 	  rsp->sign_bit_copies = 1;
1815 	}
1816     }
1817 }
1818 
1819 /* See if INSN can be combined into I3.  PRED, PRED2, SUCC and SUCC2 are
1820    optionally insns that were previously combined into I3 or that will be
1821    combined into the merger of INSN and I3.  The order is PRED, PRED2,
1822    INSN, SUCC, SUCC2, I3.
1823 
1824    Return 0 if the combination is not allowed for any reason.
1825 
1826    If the combination is allowed, *PDEST will be set to the single
1827    destination of INSN and *PSRC to the single source, and this function
1828    will return 1.  */
1829 
1830 static int
1831 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1832 	       rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1833 	       rtx *pdest, rtx *psrc)
1834 {
1835   int i;
1836   const_rtx set = 0;
1837   rtx src, dest;
1838   rtx_insn *p;
1839   rtx link;
1840   bool all_adjacent = true;
1841   int (*is_volatile_p) (const_rtx);
1842 
1843   if (succ)
1844     {
1845       if (succ2)
1846 	{
1847 	  if (next_active_insn (succ2) != i3)
1848 	    all_adjacent = false;
1849 	  if (next_active_insn (succ) != succ2)
1850 	    all_adjacent = false;
1851 	}
1852       else if (next_active_insn (succ) != i3)
1853 	all_adjacent = false;
1854       if (next_active_insn (insn) != succ)
1855 	all_adjacent = false;
1856     }
1857   else if (next_active_insn (insn) != i3)
1858     all_adjacent = false;
1859 
1860   /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1861      or a PARALLEL consisting of such a SET and CLOBBERs.
1862 
1863      If INSN has CLOBBER parallel parts, ignore them for our processing.
1864      By definition, these happen during the execution of the insn.  When it
1865      is merged with another insn, all bets are off.  If they are, in fact,
1866      needed and aren't also supplied in I3, they may be added by
1867      recog_for_combine.  Otherwise, it won't match.
1868 
1869      We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1870      note.
1871 
1872      Get the source and destination of INSN.  If more than one, can't
1873      combine.  */
1874 
1875   if (GET_CODE (PATTERN (insn)) == SET)
1876     set = PATTERN (insn);
1877   else if (GET_CODE (PATTERN (insn)) == PARALLEL
1878 	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1879     {
1880       for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1881 	{
1882 	  rtx elt = XVECEXP (PATTERN (insn), 0, i);
1883 
1884 	  switch (GET_CODE (elt))
1885 	    {
1886 	    /* This is important to combine floating point insns
1887 	       for the SH4 port.  */
1888 	    case USE:
1889 	      /* Combining an isolated USE doesn't make sense.
1890 		 We depend here on combinable_i3pat to reject them.  */
1891 	      /* The code below this loop only verifies that the inputs of
1892 		 the SET in INSN do not change.  We call reg_set_between_p
1893 		 to verify that the REG in the USE does not change between
1894 		 I3 and INSN.
1895 		 If the USE in INSN was for a pseudo register, the matching
1896 		 insn pattern will likely match any register; combining this
1897 		 with any other USE would only be safe if we knew that the
1898 		 used registers have identical values, or if there was
1899 		 something to tell them apart, e.g. different modes.  For
1900 		 now, we forgo such complicated tests and simply disallow
1901 		 combining of USES of pseudo registers with any other USE.  */
1902 	      if (REG_P (XEXP (elt, 0))
1903 		  && GET_CODE (PATTERN (i3)) == PARALLEL)
1904 		{
1905 		  rtx i3pat = PATTERN (i3);
1906 		  int i = XVECLEN (i3pat, 0) - 1;
1907 		  unsigned int regno = REGNO (XEXP (elt, 0));
1908 
1909 		  do
1910 		    {
1911 		      rtx i3elt = XVECEXP (i3pat, 0, i);
1912 
1913 		      if (GET_CODE (i3elt) == USE
1914 			  && REG_P (XEXP (i3elt, 0))
1915 			  && (REGNO (XEXP (i3elt, 0)) == regno
1916 			      ? reg_set_between_p (XEXP (elt, 0),
1917 						   PREV_INSN (insn), i3)
1918 			      : regno >= FIRST_PSEUDO_REGISTER))
1919 			return 0;
1920 		    }
1921 		  while (--i >= 0);
1922 		}
1923 	      break;
1924 
1925 	      /* We can ignore CLOBBERs.  */
1926 	    case CLOBBER:
1927 	    case CLOBBER_HIGH:
1928 	      break;
1929 
1930 	    case SET:
1931 	      /* Ignore SETs whose result isn't used but not those that
1932 		 have side-effects.  */
1933 	      if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1934 		  && insn_nothrow_p (insn)
1935 		  && !side_effects_p (elt))
1936 		break;
1937 
1938 	      /* If we have already found a SET, this is a second one and
1939 		 so we cannot combine with this insn.  */
1940 	      if (set)
1941 		return 0;
1942 
1943 	      set = elt;
1944 	      break;
1945 
1946 	    default:
1947 	      /* Anything else means we can't combine.  */
1948 	      return 0;
1949 	    }
1950 	}
1951 
1952       if (set == 0
1953 	  /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1954 	     so don't do anything with it.  */
1955 	  || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1956 	return 0;
1957     }
1958   else
1959     return 0;
1960 
1961   if (set == 0)
1962     return 0;
1963 
1964   /* The simplification in expand_field_assignment may call back to
1965      get_last_value, so set safe guard here.  */
1966   subst_low_luid = DF_INSN_LUID (insn);
1967 
1968   set = expand_field_assignment (set);
1969   src = SET_SRC (set), dest = SET_DEST (set);
1970 
1971   /* Do not eliminate user-specified register if it is in an
1972      asm input because we may break the register asm usage defined
1973      in GCC manual if allow to do so.
1974      Be aware that this may cover more cases than we expect but this
1975      should be harmless.  */
1976   if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1977       && extract_asm_operands (PATTERN (i3)))
1978     return 0;
1979 
1980   /* Don't eliminate a store in the stack pointer.  */
1981   if (dest == stack_pointer_rtx
1982       /* Don't combine with an insn that sets a register to itself if it has
1983 	 a REG_EQUAL note.  This may be part of a LIBCALL sequence.  */
1984       || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1985       /* Can't merge an ASM_OPERANDS.  */
1986       || GET_CODE (src) == ASM_OPERANDS
1987       /* Can't merge a function call.  */
1988       || GET_CODE (src) == CALL
1989       /* Don't eliminate a function call argument.  */
1990       || (CALL_P (i3)
1991 	  && (find_reg_fusage (i3, USE, dest)
1992 	      || (REG_P (dest)
1993 		  && REGNO (dest) < FIRST_PSEUDO_REGISTER
1994 		  && global_regs[REGNO (dest)])))
1995       /* Don't substitute into an incremented register.  */
1996       || FIND_REG_INC_NOTE (i3, dest)
1997       || (succ && FIND_REG_INC_NOTE (succ, dest))
1998       || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1999       /* Don't substitute into a non-local goto, this confuses CFG.  */
2000       || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
2001       /* Make sure that DEST is not used after INSN but before SUCC, or
2002 	 after SUCC and before SUCC2, or after SUCC2 but before I3.  */
2003       || (!all_adjacent
2004 	  && ((succ2
2005 	       && (reg_used_between_p (dest, succ2, i3)
2006 		   || reg_used_between_p (dest, succ, succ2)))
2007 	      || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
2008 	      || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2009 	      || (succ
2010 		  /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2011 		     that case SUCC is not in the insn stream, so use SUCC2
2012 		     instead for this test.  */
2013 		  && reg_used_between_p (dest, insn,
2014 					 succ2
2015 					 && INSN_UID (succ) == INSN_UID (succ2)
2016 					 ? succ2 : succ))))
2017       /* Make sure that the value that is to be substituted for the register
2018 	 does not use any registers whose values alter in between.  However,
2019 	 If the insns are adjacent, a use can't cross a set even though we
2020 	 think it might (this can happen for a sequence of insns each setting
2021 	 the same destination; last_set of that register might point to
2022 	 a NOTE).  If INSN has a REG_EQUIV note, the register is always
2023 	 equivalent to the memory so the substitution is valid even if there
2024 	 are intervening stores.  Also, don't move a volatile asm or
2025 	 UNSPEC_VOLATILE across any other insns.  */
2026       || (! all_adjacent
2027 	  && (((!MEM_P (src)
2028 		|| ! find_reg_note (insn, REG_EQUIV, src))
2029 	       && modified_between_p (src, insn, i3))
2030 	      || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2031 	      || GET_CODE (src) == UNSPEC_VOLATILE))
2032       /* Don't combine across a CALL_INSN, because that would possibly
2033 	 change whether the life span of some REGs crosses calls or not,
2034 	 and it is a pain to update that information.
2035 	 Exception: if source is a constant, moving it later can't hurt.
2036 	 Accept that as a special case.  */
2037       || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2038     return 0;
2039 
2040   /* DEST must either be a REG or CC0.  */
2041   if (REG_P (dest))
2042     {
2043       /* If register alignment is being enforced for multi-word items in all
2044 	 cases except for parameters, it is possible to have a register copy
2045 	 insn referencing a hard register that is not allowed to contain the
2046 	 mode being copied and which would not be valid as an operand of most
2047 	 insns.  Eliminate this problem by not combining with such an insn.
2048 
2049 	 Also, on some machines we don't want to extend the life of a hard
2050 	 register.  */
2051 
2052       if (REG_P (src)
2053 	  && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2054 	       && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2055 	      /* Don't extend the life of a hard register unless it is
2056 		 user variable (if we have few registers) or it can't
2057 		 fit into the desired register (meaning something special
2058 		 is going on).
2059 		 Also avoid substituting a return register into I3, because
2060 		 reload can't handle a conflict with constraints of other
2061 		 inputs.  */
2062 	      || (REGNO (src) < FIRST_PSEUDO_REGISTER
2063 		  && !targetm.hard_regno_mode_ok (REGNO (src),
2064 						  GET_MODE (src)))))
2065 	return 0;
2066     }
2067   else if (GET_CODE (dest) != CC0)
2068     return 0;
2069 
2070 
2071   if (GET_CODE (PATTERN (i3)) == PARALLEL)
2072     for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2073       if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2074 	{
2075 	  rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2076 
2077 	  /* If the clobber represents an earlyclobber operand, we must not
2078 	     substitute an expression containing the clobbered register.
2079 	     As we do not analyze the constraint strings here, we have to
2080 	     make the conservative assumption.  However, if the register is
2081 	     a fixed hard reg, the clobber cannot represent any operand;
2082 	     we leave it up to the machine description to either accept or
2083 	     reject use-and-clobber patterns.  */
2084 	  if (!REG_P (reg)
2085 	      || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2086 	      || !fixed_regs[REGNO (reg)])
2087 	    if (reg_overlap_mentioned_p (reg, src))
2088 	      return 0;
2089 	}
2090 
2091   /* If INSN contains anything volatile, or is an `asm' (whether volatile
2092      or not), reject, unless nothing volatile comes between it and I3 */
2093 
2094   if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2095     {
2096       /* Make sure neither succ nor succ2 contains a volatile reference.  */
2097       if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2098 	return 0;
2099       if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2100 	return 0;
2101       /* We'll check insns between INSN and I3 below.  */
2102     }
2103 
2104   /* If INSN is an asm, and DEST is a hard register, reject, since it has
2105      to be an explicit register variable, and was chosen for a reason.  */
2106 
2107   if (GET_CODE (src) == ASM_OPERANDS
2108       && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2109     return 0;
2110 
2111   /* If INSN contains volatile references (specifically volatile MEMs),
2112      we cannot combine across any other volatile references.
2113      Even if INSN doesn't contain volatile references, any intervening
2114      volatile insn might affect machine state.  */
2115 
2116   is_volatile_p = volatile_refs_p (PATTERN (insn))
2117     ? volatile_refs_p
2118     : volatile_insn_p;
2119 
2120   for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2121     if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2122       return 0;
2123 
2124   /* If INSN contains an autoincrement or autodecrement, make sure that
2125      register is not used between there and I3, and not already used in
2126      I3 either.  Neither must it be used in PRED or SUCC, if they exist.
2127      Also insist that I3 not be a jump; if it were one
2128      and the incremented register were spilled, we would lose.  */
2129 
2130   if (AUTO_INC_DEC)
2131     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2132       if (REG_NOTE_KIND (link) == REG_INC
2133 	  && (JUMP_P (i3)
2134 	      || reg_used_between_p (XEXP (link, 0), insn, i3)
2135 	      || (pred != NULL_RTX
2136 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2137 	      || (pred2 != NULL_RTX
2138 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2139 	      || (succ != NULL_RTX
2140 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2141 	      || (succ2 != NULL_RTX
2142 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2143 	      || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2144 	return 0;
2145 
2146   /* Don't combine an insn that follows a CC0-setting insn.
2147      An insn that uses CC0 must not be separated from the one that sets it.
2148      We do, however, allow I2 to follow a CC0-setting insn if that insn
2149      is passed as I1; in that case it will be deleted also.
2150      We also allow combining in this case if all the insns are adjacent
2151      because that would leave the two CC0 insns adjacent as well.
2152      It would be more logical to test whether CC0 occurs inside I1 or I2,
2153      but that would be much slower, and this ought to be equivalent.  */
2154 
2155   if (HAVE_cc0)
2156     {
2157       p = prev_nonnote_insn (insn);
2158       if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2159 	  && ! all_adjacent)
2160 	return 0;
2161     }
2162 
2163   /* If we get here, we have passed all the tests and the combination is
2164      to be allowed.  */
2165 
2166   *pdest = dest;
2167   *psrc = src;
2168 
2169   return 1;
2170 }
2171 
2172 /* LOC is the location within I3 that contains its pattern or the component
2173    of a PARALLEL of the pattern.  We validate that it is valid for combining.
2174 
2175    One problem is if I3 modifies its output, as opposed to replacing it
2176    entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2177    doing so would produce an insn that is not equivalent to the original insns.
2178 
2179    Consider:
2180 
2181 	 (set (reg:DI 101) (reg:DI 100))
2182 	 (set (subreg:SI (reg:DI 101) 0) <foo>)
2183 
2184    This is NOT equivalent to:
2185 
2186 	 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2187 		    (set (reg:DI 101) (reg:DI 100))])
2188 
2189    Not only does this modify 100 (in which case it might still be valid
2190    if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2191 
2192    We can also run into a problem if I2 sets a register that I1
2193    uses and I1 gets directly substituted into I3 (not via I2).  In that
2194    case, we would be getting the wrong value of I2DEST into I3, so we
2195    must reject the combination.  This case occurs when I2 and I1 both
2196    feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2197    If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2198    of a SET must prevent combination from occurring.  The same situation
2199    can occur for I0, in which case I0_NOT_IN_SRC is set.
2200 
2201    Before doing the above check, we first try to expand a field assignment
2202    into a set of logical operations.
2203 
2204    If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2205    we place a register that is both set and used within I3.  If more than one
2206    such register is detected, we fail.
2207 
2208    Return 1 if the combination is valid, zero otherwise.  */
2209 
2210 static int
2211 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2212 		  int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2213 {
2214   rtx x = *loc;
2215 
2216   if (GET_CODE (x) == SET)
2217     {
2218       rtx set = x ;
2219       rtx dest = SET_DEST (set);
2220       rtx src = SET_SRC (set);
2221       rtx inner_dest = dest;
2222       rtx subdest;
2223 
2224       while (GET_CODE (inner_dest) == STRICT_LOW_PART
2225 	     || GET_CODE (inner_dest) == SUBREG
2226 	     || GET_CODE (inner_dest) == ZERO_EXTRACT)
2227 	inner_dest = XEXP (inner_dest, 0);
2228 
2229       /* Check for the case where I3 modifies its output, as discussed
2230 	 above.  We don't want to prevent pseudos from being combined
2231 	 into the address of a MEM, so only prevent the combination if
2232 	 i1 or i2 set the same MEM.  */
2233       if ((inner_dest != dest &&
2234 	   (!MEM_P (inner_dest)
2235 	    || rtx_equal_p (i2dest, inner_dest)
2236 	    || (i1dest && rtx_equal_p (i1dest, inner_dest))
2237 	    || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2238 	   && (reg_overlap_mentioned_p (i2dest, inner_dest)
2239 	       || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2240 	       || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2241 
2242 	  /* This is the same test done in can_combine_p except we can't test
2243 	     all_adjacent; we don't have to, since this instruction will stay
2244 	     in place, thus we are not considering increasing the lifetime of
2245 	     INNER_DEST.
2246 
2247 	     Also, if this insn sets a function argument, combining it with
2248 	     something that might need a spill could clobber a previous
2249 	     function argument; the all_adjacent test in can_combine_p also
2250 	     checks this; here, we do a more specific test for this case.  */
2251 
2252 	  || (REG_P (inner_dest)
2253 	      && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2254 	      && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2255 					      GET_MODE (inner_dest)))
2256 	  || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2257 	  || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2258 	return 0;
2259 
2260       /* If DEST is used in I3, it is being killed in this insn, so
2261 	 record that for later.  We have to consider paradoxical
2262 	 subregs here, since they kill the whole register, but we
2263 	 ignore partial subregs, STRICT_LOW_PART, etc.
2264 	 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2265 	 STACK_POINTER_REGNUM, since these are always considered to be
2266 	 live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
2267       subdest = dest;
2268       if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2269 	subdest = SUBREG_REG (subdest);
2270       if (pi3dest_killed
2271 	  && REG_P (subdest)
2272 	  && reg_referenced_p (subdest, PATTERN (i3))
2273 	  && REGNO (subdest) != FRAME_POINTER_REGNUM
2274 	  && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2275 	      || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2276 	  && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2277 	      || (REGNO (subdest) != ARG_POINTER_REGNUM
2278 		  || ! fixed_regs [REGNO (subdest)]))
2279 	  && REGNO (subdest) != STACK_POINTER_REGNUM)
2280 	{
2281 	  if (*pi3dest_killed)
2282 	    return 0;
2283 
2284 	  *pi3dest_killed = subdest;
2285 	}
2286     }
2287 
2288   else if (GET_CODE (x) == PARALLEL)
2289     {
2290       int i;
2291 
2292       for (i = 0; i < XVECLEN (x, 0); i++)
2293 	if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2294 				i1_not_in_src, i0_not_in_src, pi3dest_killed))
2295 	  return 0;
2296     }
2297 
2298   return 1;
2299 }
2300 
2301 /* Return 1 if X is an arithmetic expression that contains a multiplication
2302    and division.  We don't count multiplications by powers of two here.  */
2303 
2304 static int
2305 contains_muldiv (rtx x)
2306 {
2307   switch (GET_CODE (x))
2308     {
2309     case MOD:  case DIV:  case UMOD:  case UDIV:
2310       return 1;
2311 
2312     case MULT:
2313       return ! (CONST_INT_P (XEXP (x, 1))
2314 		&& pow2p_hwi (UINTVAL (XEXP (x, 1))));
2315     default:
2316       if (BINARY_P (x))
2317 	return contains_muldiv (XEXP (x, 0))
2318 	    || contains_muldiv (XEXP (x, 1));
2319 
2320       if (UNARY_P (x))
2321 	return contains_muldiv (XEXP (x, 0));
2322 
2323       return 0;
2324     }
2325 }
2326 
2327 /* Determine whether INSN can be used in a combination.  Return nonzero if
2328    not.  This is used in try_combine to detect early some cases where we
2329    can't perform combinations.  */
2330 
2331 static int
2332 cant_combine_insn_p (rtx_insn *insn)
2333 {
2334   rtx set;
2335   rtx src, dest;
2336 
2337   /* If this isn't really an insn, we can't do anything.
2338      This can occur when flow deletes an insn that it has merged into an
2339      auto-increment address.  */
2340   if (!NONDEBUG_INSN_P (insn))
2341     return 1;
2342 
2343   /* Never combine loads and stores involving hard regs that are likely
2344      to be spilled.  The register allocator can usually handle such
2345      reg-reg moves by tying.  If we allow the combiner to make
2346      substitutions of likely-spilled regs, reload might die.
2347      As an exception, we allow combinations involving fixed regs; these are
2348      not available to the register allocator so there's no risk involved.  */
2349 
2350   set = single_set (insn);
2351   if (! set)
2352     return 0;
2353   src = SET_SRC (set);
2354   dest = SET_DEST (set);
2355   if (GET_CODE (src) == SUBREG)
2356     src = SUBREG_REG (src);
2357   if (GET_CODE (dest) == SUBREG)
2358     dest = SUBREG_REG (dest);
2359   if (REG_P (src) && REG_P (dest)
2360       && ((HARD_REGISTER_P (src)
2361 	   && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2362 #ifdef LEAF_REGISTERS
2363 	   && ! LEAF_REGISTERS [REGNO (src)])
2364 #else
2365 	   )
2366 #endif
2367 	  || (HARD_REGISTER_P (dest)
2368 	      && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2369 	      && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2370     return 1;
2371 
2372   return 0;
2373 }
2374 
2375 struct likely_spilled_retval_info
2376 {
2377   unsigned regno, nregs;
2378   unsigned mask;
2379 };
2380 
2381 /* Called via note_stores by likely_spilled_retval_p.  Remove from info->mask
2382    hard registers that are known to be written to / clobbered in full.  */
2383 static void
2384 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2385 {
2386   struct likely_spilled_retval_info *const info =
2387     (struct likely_spilled_retval_info *) data;
2388   unsigned regno, nregs;
2389   unsigned new_mask;
2390 
2391   if (!REG_P (XEXP (set, 0)))
2392     return;
2393   regno = REGNO (x);
2394   if (regno >= info->regno + info->nregs)
2395     return;
2396   nregs = REG_NREGS (x);
2397   if (regno + nregs <= info->regno)
2398     return;
2399   new_mask = (2U << (nregs - 1)) - 1;
2400   if (regno < info->regno)
2401     new_mask >>= info->regno - regno;
2402   else
2403     new_mask <<= regno - info->regno;
2404   info->mask &= ~new_mask;
2405 }
2406 
2407 /* Return nonzero iff part of the return value is live during INSN, and
2408    it is likely spilled.  This can happen when more than one insn is needed
2409    to copy the return value, e.g. when we consider to combine into the
2410    second copy insn for a complex value.  */
2411 
2412 static int
2413 likely_spilled_retval_p (rtx_insn *insn)
2414 {
2415   rtx_insn *use = BB_END (this_basic_block);
2416   rtx reg;
2417   rtx_insn *p;
2418   unsigned regno, nregs;
2419   /* We assume here that no machine mode needs more than
2420      32 hard registers when the value overlaps with a register
2421      for which TARGET_FUNCTION_VALUE_REGNO_P is true.  */
2422   unsigned mask;
2423   struct likely_spilled_retval_info info;
2424 
2425   if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2426     return 0;
2427   reg = XEXP (PATTERN (use), 0);
2428   if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2429     return 0;
2430   regno = REGNO (reg);
2431   nregs = REG_NREGS (reg);
2432   if (nregs == 1)
2433     return 0;
2434   mask = (2U << (nregs - 1)) - 1;
2435 
2436   /* Disregard parts of the return value that are set later.  */
2437   info.regno = regno;
2438   info.nregs = nregs;
2439   info.mask = mask;
2440   for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2441     if (INSN_P (p))
2442       note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2443   mask = info.mask;
2444 
2445   /* Check if any of the (probably) live return value registers is
2446      likely spilled.  */
2447   nregs --;
2448   do
2449     {
2450       if ((mask & 1 << nregs)
2451 	  && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2452 	return 1;
2453     } while (nregs--);
2454   return 0;
2455 }
2456 
2457 /* Adjust INSN after we made a change to its destination.
2458 
2459    Changing the destination can invalidate notes that say something about
2460    the results of the insn and a LOG_LINK pointing to the insn.  */
2461 
2462 static void
2463 adjust_for_new_dest (rtx_insn *insn)
2464 {
2465   /* For notes, be conservative and simply remove them.  */
2466   remove_reg_equal_equiv_notes (insn);
2467 
2468   /* The new insn will have a destination that was previously the destination
2469      of an insn just above it.  Call distribute_links to make a LOG_LINK from
2470      the next use of that destination.  */
2471 
2472   rtx set = single_set (insn);
2473   gcc_assert (set);
2474 
2475   rtx reg = SET_DEST (set);
2476 
2477   while (GET_CODE (reg) == ZERO_EXTRACT
2478 	 || GET_CODE (reg) == STRICT_LOW_PART
2479 	 || GET_CODE (reg) == SUBREG)
2480     reg = XEXP (reg, 0);
2481   gcc_assert (REG_P (reg));
2482 
2483   distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2484 
2485   df_insn_rescan (insn);
2486 }
2487 
2488 /* Return TRUE if combine can reuse reg X in mode MODE.
2489    ADDED_SETS is nonzero if the original set is still required.  */
2490 static bool
2491 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2492 {
2493   unsigned int regno;
2494 
2495   if (!REG_P (x))
2496     return false;
2497 
2498   /* Don't change between modes with different underlying register sizes,
2499      since this could lead to invalid subregs.  */
2500   if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2501 		REGMODE_NATURAL_SIZE (GET_MODE (x))))
2502     return false;
2503 
2504   regno = REGNO (x);
2505   /* Allow hard registers if the new mode is legal, and occupies no more
2506      registers than the old mode.  */
2507   if (regno < FIRST_PSEUDO_REGISTER)
2508     return (targetm.hard_regno_mode_ok (regno, mode)
2509 	    && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2510 
2511   /* Or a pseudo that is only used once.  */
2512   return (regno < reg_n_sets_max
2513 	  && REG_N_SETS (regno) == 1
2514 	  && !added_sets
2515 	  && !REG_USERVAR_P (x));
2516 }
2517 
2518 
2519 /* Check whether X, the destination of a set, refers to part of
2520    the register specified by REG.  */
2521 
2522 static bool
2523 reg_subword_p (rtx x, rtx reg)
2524 {
2525   /* Check that reg is an integer mode register.  */
2526   if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2527     return false;
2528 
2529   if (GET_CODE (x) == STRICT_LOW_PART
2530       || GET_CODE (x) == ZERO_EXTRACT)
2531     x = XEXP (x, 0);
2532 
2533   return GET_CODE (x) == SUBREG
2534 	 && SUBREG_REG (x) == reg
2535 	 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2536 }
2537 
2538 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2539    Note that the INSN should be deleted *after* removing dead edges, so
2540    that the kept edge is the fallthrough edge for a (set (pc) (pc))
2541    but not for a (set (pc) (label_ref FOO)).  */
2542 
2543 static void
2544 update_cfg_for_uncondjump (rtx_insn *insn)
2545 {
2546   basic_block bb = BLOCK_FOR_INSN (insn);
2547   gcc_assert (BB_END (bb) == insn);
2548 
2549   purge_dead_edges (bb);
2550 
2551   delete_insn (insn);
2552   if (EDGE_COUNT (bb->succs) == 1)
2553     {
2554       rtx_insn *insn;
2555 
2556       single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2557 
2558       /* Remove barriers from the footer if there are any.  */
2559       for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2560 	if (BARRIER_P (insn))
2561 	  {
2562 	    if (PREV_INSN (insn))
2563 	      SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2564 	    else
2565 	      BB_FOOTER (bb) = NEXT_INSN (insn);
2566 	    if (NEXT_INSN (insn))
2567 	      SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2568 	  }
2569 	else if (LABEL_P (insn))
2570 	  break;
2571     }
2572 }
2573 
2574 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2575    by an arbitrary number of CLOBBERs.  */
2576 static bool
2577 is_parallel_of_n_reg_sets (rtx pat, int n)
2578 {
2579   if (GET_CODE (pat) != PARALLEL)
2580     return false;
2581 
2582   int len = XVECLEN (pat, 0);
2583   if (len < n)
2584     return false;
2585 
2586   int i;
2587   for (i = 0; i < n; i++)
2588     if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2589 	|| !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2590       return false;
2591   for ( ; i < len; i++)
2592     switch (GET_CODE (XVECEXP (pat, 0, i)))
2593       {
2594       case CLOBBER:
2595 	if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2596 	  return false;
2597 	break;
2598       case CLOBBER_HIGH:
2599 	break;
2600       default:
2601 	return false;
2602       }
2603   return true;
2604 }
2605 
2606 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2607    CLOBBERs), can be split into individual SETs in that order, without
2608    changing semantics.  */
2609 static bool
2610 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2611 {
2612   if (!insn_nothrow_p (insn))
2613     return false;
2614 
2615   rtx pat = PATTERN (insn);
2616 
2617   int i, j;
2618   for (i = 0; i < n; i++)
2619     {
2620       if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2621 	return false;
2622 
2623       rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2624 
2625       for (j = i + 1; j < n; j++)
2626 	if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2627 	  return false;
2628     }
2629 
2630   return true;
2631 }
2632 
2633 /* Return whether X is just a single set, with the source
2634    a general_operand.  */
2635 static bool
2636 is_just_move (rtx x)
2637 {
2638   if (INSN_P (x))
2639     x = PATTERN (x);
2640 
2641   return (GET_CODE (x) == SET && general_operand (SET_SRC (x), VOIDmode));
2642 }
2643 
2644 /* Callback function to count autoincs.  */
2645 
2646 static int
2647 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2648 {
2649   (*((int *) arg))++;
2650 
2651   return 0;
2652 }
2653 
2654 /* Try to combine the insns I0, I1 and I2 into I3.
2655    Here I0, I1 and I2 appear earlier than I3.
2656    I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2657    I3.
2658 
2659    If we are combining more than two insns and the resulting insn is not
2660    recognized, try splitting it into two insns.  If that happens, I2 and I3
2661    are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2662    Otherwise, I0, I1 and I2 are pseudo-deleted.
2663 
2664    Return 0 if the combination does not work.  Then nothing is changed.
2665    If we did the combination, return the insn at which combine should
2666    resume scanning.
2667 
2668    Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2669    new direct jump instruction.
2670 
2671    LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2672    been I3 passed to an earlier try_combine within the same basic
2673    block.  */
2674 
2675 static rtx_insn *
2676 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2677 	     int *new_direct_jump_p, rtx_insn *last_combined_insn)
2678 {
2679   /* New patterns for I3 and I2, respectively.  */
2680   rtx newpat, newi2pat = 0;
2681   rtvec newpat_vec_with_clobbers = 0;
2682   int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2683   /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2684      dead.  */
2685   int added_sets_0, added_sets_1, added_sets_2;
2686   /* Total number of SETs to put into I3.  */
2687   int total_sets;
2688   /* Nonzero if I2's or I1's body now appears in I3.  */
2689   int i2_is_used = 0, i1_is_used = 0;
2690   /* INSN_CODEs for new I3, new I2, and user of condition code.  */
2691   int insn_code_number, i2_code_number = 0, other_code_number = 0;
2692   /* Contains I3 if the destination of I3 is used in its source, which means
2693      that the old life of I3 is being killed.  If that usage is placed into
2694      I2 and not in I3, a REG_DEAD note must be made.  */
2695   rtx i3dest_killed = 0;
2696   /* SET_DEST and SET_SRC of I2, I1 and I0.  */
2697   rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2698   /* Copy of SET_SRC of I1 and I0, if needed.  */
2699   rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2700   /* Set if I2DEST was reused as a scratch register.  */
2701   bool i2scratch = false;
2702   /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases.  */
2703   rtx i0pat = 0, i1pat = 0, i2pat = 0;
2704   /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC.  */
2705   int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2706   int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2707   int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2708   int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2709   /* Notes that must be added to REG_NOTES in I3 and I2.  */
2710   rtx new_i3_notes, new_i2_notes;
2711   /* Notes that we substituted I3 into I2 instead of the normal case.  */
2712   int i3_subst_into_i2 = 0;
2713   /* Notes that I1, I2 or I3 is a MULT operation.  */
2714   int have_mult = 0;
2715   int swap_i2i3 = 0;
2716   int split_i2i3 = 0;
2717   int changed_i3_dest = 0;
2718   bool i2_was_move = false, i3_was_move = false;
2719   int n_auto_inc = 0;
2720 
2721   int maxreg;
2722   rtx_insn *temp_insn;
2723   rtx temp_expr;
2724   struct insn_link *link;
2725   rtx other_pat = 0;
2726   rtx new_other_notes;
2727   int i;
2728   scalar_int_mode dest_mode, temp_mode;
2729 
2730   /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2731      never be).  */
2732   if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2733     return 0;
2734 
2735   /* Only try four-insn combinations when there's high likelihood of
2736      success.  Look for simple insns, such as loads of constants or
2737      binary operations involving a constant.  */
2738   if (i0)
2739     {
2740       int i;
2741       int ngood = 0;
2742       int nshift = 0;
2743       rtx set0, set3;
2744 
2745       if (!flag_expensive_optimizations)
2746 	return 0;
2747 
2748       for (i = 0; i < 4; i++)
2749 	{
2750 	  rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2751 	  rtx set = single_set (insn);
2752 	  rtx src;
2753 	  if (!set)
2754 	    continue;
2755 	  src = SET_SRC (set);
2756 	  if (CONSTANT_P (src))
2757 	    {
2758 	      ngood += 2;
2759 	      break;
2760 	    }
2761 	  else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2762 	    ngood++;
2763 	  else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2764 		   || GET_CODE (src) == LSHIFTRT)
2765 	    nshift++;
2766 	}
2767 
2768       /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2769 	 are likely manipulating its value.  Ideally we'll be able to combine
2770 	 all four insns into a bitfield insertion of some kind.
2771 
2772 	 Note the source in I0 might be inside a sign/zero extension and the
2773 	 memory modes in I0 and I3 might be different.  So extract the address
2774 	 from the destination of I3 and search for it in the source of I0.
2775 
2776 	 In the event that there's a match but the source/dest do not actually
2777 	 refer to the same memory, the worst that happens is we try some
2778 	 combinations that we wouldn't have otherwise.  */
2779       if ((set0 = single_set (i0))
2780 	  /* Ensure the source of SET0 is a MEM, possibly buried inside
2781 	     an extension.  */
2782 	  && (GET_CODE (SET_SRC (set0)) == MEM
2783 	      || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2784 		   || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2785 		  && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2786 	  && (set3 = single_set (i3))
2787 	  /* Ensure the destination of SET3 is a MEM.  */
2788 	  && GET_CODE (SET_DEST (set3)) == MEM
2789 	  /* Would it be better to extract the base address for the MEM
2790 	     in SET3 and look for that?  I don't have cases where it matters
2791 	     but I could envision such cases.  */
2792 	  && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2793 	ngood += 2;
2794 
2795       if (ngood < 2 && nshift < 2)
2796 	return 0;
2797     }
2798 
2799   /* Exit early if one of the insns involved can't be used for
2800      combinations.  */
2801   if (CALL_P (i2)
2802       || (i1 && CALL_P (i1))
2803       || (i0 && CALL_P (i0))
2804       || cant_combine_insn_p (i3)
2805       || cant_combine_insn_p (i2)
2806       || (i1 && cant_combine_insn_p (i1))
2807       || (i0 && cant_combine_insn_p (i0))
2808       || likely_spilled_retval_p (i3))
2809     return 0;
2810 
2811   combine_attempts++;
2812   undobuf.other_insn = 0;
2813 
2814   /* Reset the hard register usage information.  */
2815   CLEAR_HARD_REG_SET (newpat_used_regs);
2816 
2817   if (dump_file && (dump_flags & TDF_DETAILS))
2818     {
2819       if (i0)
2820 	fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2821 		 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2822       else if (i1)
2823 	fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2824 		 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2825       else
2826 	fprintf (dump_file, "\nTrying %d -> %d:\n",
2827 		 INSN_UID (i2), INSN_UID (i3));
2828 
2829       if (i0)
2830 	dump_insn_slim (dump_file, i0);
2831       if (i1)
2832 	dump_insn_slim (dump_file, i1);
2833       dump_insn_slim (dump_file, i2);
2834       dump_insn_slim (dump_file, i3);
2835     }
2836 
2837   /* If multiple insns feed into one of I2 or I3, they can be in any
2838      order.  To simplify the code below, reorder them in sequence.  */
2839   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2840     std::swap (i0, i2);
2841   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2842     std::swap (i0, i1);
2843   if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2844     std::swap (i1, i2);
2845 
2846   added_links_insn = 0;
2847   added_notes_insn = 0;
2848 
2849   /* First check for one important special case that the code below will
2850      not handle.  Namely, the case where I1 is zero, I2 is a PARALLEL
2851      and I3 is a SET whose SET_SRC is a SET_DEST in I2.  In that case,
2852      we may be able to replace that destination with the destination of I3.
2853      This occurs in the common code where we compute both a quotient and
2854      remainder into a structure, in which case we want to do the computation
2855      directly into the structure to avoid register-register copies.
2856 
2857      Note that this case handles both multiple sets in I2 and also cases
2858      where I2 has a number of CLOBBERs inside the PARALLEL.
2859 
2860      We make very conservative checks below and only try to handle the
2861      most common cases of this.  For example, we only handle the case
2862      where I2 and I3 are adjacent to avoid making difficult register
2863      usage tests.  */
2864 
2865   if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2866       && REG_P (SET_SRC (PATTERN (i3)))
2867       && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2868       && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2869       && GET_CODE (PATTERN (i2)) == PARALLEL
2870       && ! side_effects_p (SET_DEST (PATTERN (i3)))
2871       /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2872 	 below would need to check what is inside (and reg_overlap_mentioned_p
2873 	 doesn't support those codes anyway).  Don't allow those destinations;
2874 	 the resulting insn isn't likely to be recognized anyway.  */
2875       && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2876       && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2877       && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2878 				    SET_DEST (PATTERN (i3)))
2879       && next_active_insn (i2) == i3)
2880     {
2881       rtx p2 = PATTERN (i2);
2882 
2883       /* Make sure that the destination of I3,
2884 	 which we are going to substitute into one output of I2,
2885 	 is not used within another output of I2.  We must avoid making this:
2886 	 (parallel [(set (mem (reg 69)) ...)
2887 		    (set (reg 69) ...)])
2888 	 which is not well-defined as to order of actions.
2889 	 (Besides, reload can't handle output reloads for this.)
2890 
2891 	 The problem can also happen if the dest of I3 is a memory ref,
2892 	 if another dest in I2 is an indirect memory ref.
2893 
2894 	 Neither can this PARALLEL be an asm.  We do not allow combining
2895 	 that usually (see can_combine_p), so do not here either.  */
2896       bool ok = true;
2897       for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2898 	{
2899 	  if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2900 	       || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER
2901 	       || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER_HIGH)
2902 	      && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2903 					  SET_DEST (XVECEXP (p2, 0, i))))
2904 	    ok = false;
2905 	  else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2906 		   && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2907 	    ok = false;
2908 	}
2909 
2910       if (ok)
2911 	for (i = 0; i < XVECLEN (p2, 0); i++)
2912 	  if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2913 	      && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2914 	    {
2915 	      combine_merges++;
2916 
2917 	      subst_insn = i3;
2918 	      subst_low_luid = DF_INSN_LUID (i2);
2919 
2920 	      added_sets_2 = added_sets_1 = added_sets_0 = 0;
2921 	      i2src = SET_SRC (XVECEXP (p2, 0, i));
2922 	      i2dest = SET_DEST (XVECEXP (p2, 0, i));
2923 	      i2dest_killed = dead_or_set_p (i2, i2dest);
2924 
2925 	      /* Replace the dest in I2 with our dest and make the resulting
2926 		 insn the new pattern for I3.  Then skip to where we validate
2927 		 the pattern.  Everything was set up above.  */
2928 	      SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2929 	      newpat = p2;
2930 	      i3_subst_into_i2 = 1;
2931 	      goto validate_replacement;
2932 	    }
2933     }
2934 
2935   /* If I2 is setting a pseudo to a constant and I3 is setting some
2936      sub-part of it to another constant, merge them by making a new
2937      constant.  */
2938   if (i1 == 0
2939       && (temp_expr = single_set (i2)) != 0
2940       && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2941       && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2942       && GET_CODE (PATTERN (i3)) == SET
2943       && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2944       && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2945     {
2946       rtx dest = SET_DEST (PATTERN (i3));
2947       rtx temp_dest = SET_DEST (temp_expr);
2948       int offset = -1;
2949       int width = 0;
2950 
2951       if (GET_CODE (dest) == ZERO_EXTRACT)
2952 	{
2953 	  if (CONST_INT_P (XEXP (dest, 1))
2954 	      && CONST_INT_P (XEXP (dest, 2))
2955 	      && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2956 					 &dest_mode))
2957 	    {
2958 	      width = INTVAL (XEXP (dest, 1));
2959 	      offset = INTVAL (XEXP (dest, 2));
2960 	      dest = XEXP (dest, 0);
2961 	      if (BITS_BIG_ENDIAN)
2962 		offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2963 	    }
2964 	}
2965       else
2966 	{
2967 	  if (GET_CODE (dest) == STRICT_LOW_PART)
2968 	    dest = XEXP (dest, 0);
2969 	  if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2970 	    {
2971 	      width = GET_MODE_PRECISION (dest_mode);
2972 	      offset = 0;
2973 	    }
2974 	}
2975 
2976       if (offset >= 0)
2977 	{
2978 	  /* If this is the low part, we're done.  */
2979 	  if (subreg_lowpart_p (dest))
2980 	    ;
2981 	  /* Handle the case where inner is twice the size of outer.  */
2982 	  else if (GET_MODE_PRECISION (temp_mode)
2983 		   == 2 * GET_MODE_PRECISION (dest_mode))
2984 	    offset += GET_MODE_PRECISION (dest_mode);
2985 	  /* Otherwise give up for now.  */
2986 	  else
2987 	    offset = -1;
2988 	}
2989 
2990       if (offset >= 0)
2991 	{
2992 	  rtx inner = SET_SRC (PATTERN (i3));
2993 	  rtx outer = SET_SRC (temp_expr);
2994 
2995 	  wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2996 				   rtx_mode_t (inner, dest_mode),
2997 				   offset, width);
2998 
2999 	  combine_merges++;
3000 	  subst_insn = i3;
3001 	  subst_low_luid = DF_INSN_LUID (i2);
3002 	  added_sets_2 = added_sets_1 = added_sets_0 = 0;
3003 	  i2dest = temp_dest;
3004 	  i2dest_killed = dead_or_set_p (i2, i2dest);
3005 
3006 	  /* Replace the source in I2 with the new constant and make the
3007 	     resulting insn the new pattern for I3.  Then skip to where we
3008 	     validate the pattern.  Everything was set up above.  */
3009 	  SUBST (SET_SRC (temp_expr),
3010 		 immed_wide_int_const (o, temp_mode));
3011 
3012 	  newpat = PATTERN (i2);
3013 
3014           /* The dest of I3 has been replaced with the dest of I2.  */
3015           changed_i3_dest = 1;
3016 	  goto validate_replacement;
3017 	}
3018     }
3019 
3020   /* If we have no I1 and I2 looks like:
3021 	(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3022 		   (set Y OP)])
3023      make up a dummy I1 that is
3024 	(set Y OP)
3025      and change I2 to be
3026 	(set (reg:CC X) (compare:CC Y (const_int 0)))
3027 
3028      (We can ignore any trailing CLOBBERs.)
3029 
3030      This undoes a previous combination and allows us to match a branch-and-
3031      decrement insn.  */
3032 
3033   if (!HAVE_cc0 && i1 == 0
3034       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3035       && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
3036 	  == MODE_CC)
3037       && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
3038       && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
3039       && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
3040 		      SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
3041       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3042       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3043     {
3044       /* We make I1 with the same INSN_UID as I2.  This gives it
3045 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
3046 	 never appear in the insn stream so giving it the same INSN_UID
3047 	 as I2 will not cause a problem.  */
3048 
3049       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3050 			 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3051 			 -1, NULL_RTX);
3052       INSN_UID (i1) = INSN_UID (i2);
3053 
3054       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3055       SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3056 	     SET_DEST (PATTERN (i1)));
3057       unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3058       SUBST_LINK (LOG_LINKS (i2),
3059 		  alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3060     }
3061 
3062   /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3063      make those two SETs separate I1 and I2 insns, and make an I0 that is
3064      the original I1.  */
3065   if (!HAVE_cc0 && i0 == 0
3066       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3067       && can_split_parallel_of_n_reg_sets (i2, 2)
3068       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3069       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3070       && !reg_set_between_p  (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3071       && !reg_set_between_p  (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3072     {
3073       /* If there is no I1, there is no I0 either.  */
3074       i0 = i1;
3075 
3076       /* We make I1 with the same INSN_UID as I2.  This gives it
3077 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
3078 	 never appear in the insn stream so giving it the same INSN_UID
3079 	 as I2 will not cause a problem.  */
3080 
3081       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3082 			 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3083 			 -1, NULL_RTX);
3084       INSN_UID (i1) = INSN_UID (i2);
3085 
3086       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3087     }
3088 
3089   /* Verify that I2 and maybe I1 and I0 can be combined into I3.  */
3090   if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3091     {
3092       if (dump_file && (dump_flags & TDF_DETAILS))
3093 	fprintf (dump_file, "Can't combine i2 into i3\n");
3094       undo_all ();
3095       return 0;
3096     }
3097   if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3098     {
3099       if (dump_file && (dump_flags & TDF_DETAILS))
3100 	fprintf (dump_file, "Can't combine i1 into i3\n");
3101       undo_all ();
3102       return 0;
3103     }
3104   if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3105     {
3106       if (dump_file && (dump_flags & TDF_DETAILS))
3107 	fprintf (dump_file, "Can't combine i0 into i3\n");
3108       undo_all ();
3109       return 0;
3110     }
3111 
3112   /* Record whether i2 and i3 are trivial moves.  */
3113   i2_was_move = is_just_move (i2);
3114   i3_was_move = is_just_move (i3);
3115 
3116   /* Record whether I2DEST is used in I2SRC and similarly for the other
3117      cases.  Knowing this will help in register status updating below.  */
3118   i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3119   i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3120   i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3121   i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3122   i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3123   i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3124   i2dest_killed = dead_or_set_p (i2, i2dest);
3125   i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3126   i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3127 
3128   /* For the earlier insns, determine which of the subsequent ones they
3129      feed.  */
3130   i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3131   i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3132   i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3133 			  : (!reg_overlap_mentioned_p (i1dest, i0dest)
3134 			     && reg_overlap_mentioned_p (i0dest, i2src))));
3135 
3136   /* Ensure that I3's pattern can be the destination of combines.  */
3137   if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3138 			  i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3139 			  i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3140 				 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3141 			  &i3dest_killed))
3142     {
3143       undo_all ();
3144       return 0;
3145     }
3146 
3147   /* See if any of the insns is a MULT operation.  Unless one is, we will
3148      reject a combination that is, since it must be slower.  Be conservative
3149      here.  */
3150   if (GET_CODE (i2src) == MULT
3151       || (i1 != 0 && GET_CODE (i1src) == MULT)
3152       || (i0 != 0 && GET_CODE (i0src) == MULT)
3153       || (GET_CODE (PATTERN (i3)) == SET
3154 	  && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3155     have_mult = 1;
3156 
3157   /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3158      We used to do this EXCEPT in one case: I3 has a post-inc in an
3159      output operand.  However, that exception can give rise to insns like
3160 	mov r3,(r3)+
3161      which is a famous insn on the PDP-11 where the value of r3 used as the
3162      source was model-dependent.  Avoid this sort of thing.  */
3163 
3164 #if 0
3165   if (!(GET_CODE (PATTERN (i3)) == SET
3166 	&& REG_P (SET_SRC (PATTERN (i3)))
3167 	&& MEM_P (SET_DEST (PATTERN (i3)))
3168 	&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3169 	    || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3170     /* It's not the exception.  */
3171 #endif
3172     if (AUTO_INC_DEC)
3173       {
3174 	rtx link;
3175 	for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3176 	  if (REG_NOTE_KIND (link) == REG_INC
3177 	      && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3178 		  || (i1 != 0
3179 		      && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3180 	    {
3181 	      undo_all ();
3182 	      return 0;
3183 	    }
3184       }
3185 
3186   /* See if the SETs in I1 or I2 need to be kept around in the merged
3187      instruction: whenever the value set there is still needed past I3.
3188      For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3189 
3190      For the SET in I1, we have two cases: if I1 and I2 independently feed
3191      into I3, the set in I1 needs to be kept around unless I1DEST dies
3192      or is set in I3.  Otherwise (if I1 feeds I2 which feeds I3), the set
3193      in I1 needs to be kept around unless I1DEST dies or is set in either
3194      I2 or I3.  The same considerations apply to I0.  */
3195 
3196   added_sets_2 = !dead_or_set_p (i3, i2dest);
3197 
3198   if (i1)
3199     added_sets_1 = !(dead_or_set_p (i3, i1dest)
3200 		     || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3201   else
3202     added_sets_1 = 0;
3203 
3204   if (i0)
3205     added_sets_0 =  !(dead_or_set_p (i3, i0dest)
3206 		      || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3207 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3208 			  && dead_or_set_p (i2, i0dest)));
3209   else
3210     added_sets_0 = 0;
3211 
3212   /* We are about to copy insns for the case where they need to be kept
3213      around.  Check that they can be copied in the merged instruction.  */
3214 
3215   if (targetm.cannot_copy_insn_p
3216       && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3217 	  || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3218 	  || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3219     {
3220       undo_all ();
3221       return 0;
3222     }
3223 
3224   /* Count how many auto_inc expressions there were in the original insns;
3225      we need to have the same number in the resulting patterns.  */
3226 
3227   if (i0)
3228     for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3229   if (i1)
3230     for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3231   for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3232   for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3233 
3234   /* If the set in I2 needs to be kept around, we must make a copy of
3235      PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3236      PATTERN (I2), we are only substituting for the original I1DEST, not into
3237      an already-substituted copy.  This also prevents making self-referential
3238      rtx.  If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3239      I2DEST.  */
3240 
3241   if (added_sets_2)
3242     {
3243       if (GET_CODE (PATTERN (i2)) == PARALLEL)
3244 	i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3245       else
3246 	i2pat = copy_rtx (PATTERN (i2));
3247     }
3248 
3249   if (added_sets_1)
3250     {
3251       if (GET_CODE (PATTERN (i1)) == PARALLEL)
3252 	i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3253       else
3254 	i1pat = copy_rtx (PATTERN (i1));
3255     }
3256 
3257   if (added_sets_0)
3258     {
3259       if (GET_CODE (PATTERN (i0)) == PARALLEL)
3260 	i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3261       else
3262 	i0pat = copy_rtx (PATTERN (i0));
3263     }
3264 
3265   combine_merges++;
3266 
3267   /* Substitute in the latest insn for the regs set by the earlier ones.  */
3268 
3269   maxreg = max_reg_num ();
3270 
3271   subst_insn = i3;
3272 
3273   /* Many machines that don't use CC0 have insns that can both perform an
3274      arithmetic operation and set the condition code.  These operations will
3275      be represented as a PARALLEL with the first element of the vector
3276      being a COMPARE of an arithmetic operation with the constant zero.
3277      The second element of the vector will set some pseudo to the result
3278      of the same arithmetic operation.  If we simplify the COMPARE, we won't
3279      match such a pattern and so will generate an extra insn.   Here we test
3280      for this case, where both the comparison and the operation result are
3281      needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3282      I2SRC.  Later we will make the PARALLEL that contains I2.  */
3283 
3284   if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3285       && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3286       && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3287       && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3288     {
3289       rtx newpat_dest;
3290       rtx *cc_use_loc = NULL;
3291       rtx_insn *cc_use_insn = NULL;
3292       rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3293       machine_mode compare_mode, orig_compare_mode;
3294       enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3295       scalar_int_mode mode;
3296 
3297       newpat = PATTERN (i3);
3298       newpat_dest = SET_DEST (newpat);
3299       compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3300 
3301       if (undobuf.other_insn == 0
3302 	  && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3303 					    &cc_use_insn)))
3304 	{
3305 	  compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3306 	  if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3307 	    compare_code = simplify_compare_const (compare_code, mode,
3308 						   op0, &op1);
3309 	  target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3310 	}
3311 
3312       /* Do the rest only if op1 is const0_rtx, which may be the
3313 	 result of simplification.  */
3314       if (op1 == const0_rtx)
3315 	{
3316 	  /* If a single use of the CC is found, prepare to modify it
3317 	     when SELECT_CC_MODE returns a new CC-class mode, or when
3318 	     the above simplify_compare_const() returned a new comparison
3319 	     operator.  undobuf.other_insn is assigned the CC use insn
3320 	     when modifying it.  */
3321 	  if (cc_use_loc)
3322 	    {
3323 #ifdef SELECT_CC_MODE
3324 	      machine_mode new_mode
3325 		= SELECT_CC_MODE (compare_code, op0, op1);
3326 	      if (new_mode != orig_compare_mode
3327 		  && can_change_dest_mode (SET_DEST (newpat),
3328 					   added_sets_2, new_mode))
3329 		{
3330 		  unsigned int regno = REGNO (newpat_dest);
3331 		  compare_mode = new_mode;
3332 		  if (regno < FIRST_PSEUDO_REGISTER)
3333 		    newpat_dest = gen_rtx_REG (compare_mode, regno);
3334 		  else
3335 		    {
3336 		      SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3337 		      newpat_dest = regno_reg_rtx[regno];
3338 		    }
3339 		}
3340 #endif
3341 	      /* Cases for modifying the CC-using comparison.  */
3342 	      if (compare_code != orig_compare_code
3343 		  /* ??? Do we need to verify the zero rtx?  */
3344 		  && XEXP (*cc_use_loc, 1) == const0_rtx)
3345 		{
3346 		  /* Replace cc_use_loc with entire new RTX.  */
3347 		  SUBST (*cc_use_loc,
3348 			 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3349 					 newpat_dest, const0_rtx));
3350 		  undobuf.other_insn = cc_use_insn;
3351 		}
3352 	      else if (compare_mode != orig_compare_mode)
3353 		{
3354 		  /* Just replace the CC reg with a new mode.  */
3355 		  SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3356 		  undobuf.other_insn = cc_use_insn;
3357 		}
3358 	    }
3359 
3360 	  /* Now we modify the current newpat:
3361 	     First, SET_DEST(newpat) is updated if the CC mode has been
3362 	     altered. For targets without SELECT_CC_MODE, this should be
3363 	     optimized away.  */
3364 	  if (compare_mode != orig_compare_mode)
3365 	    SUBST (SET_DEST (newpat), newpat_dest);
3366 	  /* This is always done to propagate i2src into newpat.  */
3367 	  SUBST (SET_SRC (newpat),
3368 		 gen_rtx_COMPARE (compare_mode, op0, op1));
3369 	  /* Create new version of i2pat if needed; the below PARALLEL
3370 	     creation needs this to work correctly.  */
3371 	  if (! rtx_equal_p (i2src, op0))
3372 	    i2pat = gen_rtx_SET (i2dest, op0);
3373 	  i2_is_used = 1;
3374 	}
3375     }
3376 
3377   if (i2_is_used == 0)
3378     {
3379       /* It is possible that the source of I2 or I1 may be performing
3380 	 an unneeded operation, such as a ZERO_EXTEND of something
3381 	 that is known to have the high part zero.  Handle that case
3382 	 by letting subst look at the inner insns.
3383 
3384 	 Another way to do this would be to have a function that tries
3385 	 to simplify a single insn instead of merging two or more
3386 	 insns.  We don't do this because of the potential of infinite
3387 	 loops and because of the potential extra memory required.
3388 	 However, doing it the way we are is a bit of a kludge and
3389 	 doesn't catch all cases.
3390 
3391 	 But only do this if -fexpensive-optimizations since it slows
3392 	 things down and doesn't usually win.
3393 
3394 	 This is not done in the COMPARE case above because the
3395 	 unmodified I2PAT is used in the PARALLEL and so a pattern
3396 	 with a modified I2SRC would not match.  */
3397 
3398       if (flag_expensive_optimizations)
3399 	{
3400 	  /* Pass pc_rtx so no substitutions are done, just
3401 	     simplifications.  */
3402 	  if (i1)
3403 	    {
3404 	      subst_low_luid = DF_INSN_LUID (i1);
3405 	      i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3406 	    }
3407 
3408 	  subst_low_luid = DF_INSN_LUID (i2);
3409 	  i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3410 	}
3411 
3412       n_occurrences = 0;		/* `subst' counts here */
3413       subst_low_luid = DF_INSN_LUID (i2);
3414 
3415       /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3416 	 copy of I2SRC each time we substitute it, in order to avoid creating
3417 	 self-referential RTL when we will be substituting I1SRC for I1DEST
3418 	 later.  Likewise if I0 feeds into I2, either directly or indirectly
3419 	 through I1, and I0DEST is in I0SRC.  */
3420       newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3421 		      (i1_feeds_i2_n && i1dest_in_i1src)
3422 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3423 			  && i0dest_in_i0src));
3424       substed_i2 = 1;
3425 
3426       /* Record whether I2's body now appears within I3's body.  */
3427       i2_is_used = n_occurrences;
3428     }
3429 
3430   /* If we already got a failure, don't try to do more.  Otherwise, try to
3431      substitute I1 if we have it.  */
3432 
3433   if (i1 && GET_CODE (newpat) != CLOBBER)
3434     {
3435       /* Before we can do this substitution, we must redo the test done
3436 	 above (see detailed comments there) that ensures I1DEST isn't
3437 	 mentioned in any SETs in NEWPAT that are field assignments.  */
3438       if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3439 			     0, 0, 0))
3440 	{
3441 	  undo_all ();
3442 	  return 0;
3443 	}
3444 
3445       n_occurrences = 0;
3446       subst_low_luid = DF_INSN_LUID (i1);
3447 
3448       /* If the following substitution will modify I1SRC, make a copy of it
3449 	 for the case where it is substituted for I1DEST in I2PAT later.  */
3450       if (added_sets_2 && i1_feeds_i2_n)
3451 	i1src_copy = copy_rtx (i1src);
3452 
3453       /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3454 	 copy of I1SRC each time we substitute it, in order to avoid creating
3455 	 self-referential RTL when we will be substituting I0SRC for I0DEST
3456 	 later.  */
3457       newpat = subst (newpat, i1dest, i1src, 0, 0,
3458 		      i0_feeds_i1_n && i0dest_in_i0src);
3459       substed_i1 = 1;
3460 
3461       /* Record whether I1's body now appears within I3's body.  */
3462       i1_is_used = n_occurrences;
3463     }
3464 
3465   /* Likewise for I0 if we have it.  */
3466 
3467   if (i0 && GET_CODE (newpat) != CLOBBER)
3468     {
3469       if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3470 			     0, 0, 0))
3471 	{
3472 	  undo_all ();
3473 	  return 0;
3474 	}
3475 
3476       /* If the following substitution will modify I0SRC, make a copy of it
3477 	 for the case where it is substituted for I0DEST in I1PAT later.  */
3478       if (added_sets_1 && i0_feeds_i1_n)
3479 	i0src_copy = copy_rtx (i0src);
3480       /* And a copy for I0DEST in I2PAT substitution.  */
3481       if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3482 			   || (i0_feeds_i2_n)))
3483 	i0src_copy2 = copy_rtx (i0src);
3484 
3485       n_occurrences = 0;
3486       subst_low_luid = DF_INSN_LUID (i0);
3487       newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3488       substed_i0 = 1;
3489     }
3490 
3491   if (n_auto_inc)
3492     {
3493       int new_n_auto_inc = 0;
3494       for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3495 
3496       if (n_auto_inc != new_n_auto_inc)
3497 	{
3498 	  if (dump_file && (dump_flags & TDF_DETAILS))
3499 	    fprintf (dump_file, "Number of auto_inc expressions changed\n");
3500 	  undo_all ();
3501 	  return 0;
3502 	}
3503     }
3504 
3505   /* Fail if an autoincrement side-effect has been duplicated.  Be careful
3506      to count all the ways that I2SRC and I1SRC can be used.  */
3507   if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3508        && i2_is_used + added_sets_2 > 1)
3509       || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3510 	  && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3511 	      > 1))
3512       || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3513 	  && (n_occurrences + added_sets_0
3514 	      + (added_sets_1 && i0_feeds_i1_n)
3515 	      + (added_sets_2 && i0_feeds_i2_n)
3516 	      > 1))
3517       /* Fail if we tried to make a new register.  */
3518       || max_reg_num () != maxreg
3519       /* Fail if we couldn't do something and have a CLOBBER.  */
3520       || GET_CODE (newpat) == CLOBBER
3521       /* Fail if this new pattern is a MULT and we didn't have one before
3522 	 at the outer level.  */
3523       || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3524 	  && ! have_mult))
3525     {
3526       undo_all ();
3527       return 0;
3528     }
3529 
3530   /* If the actions of the earlier insns must be kept
3531      in addition to substituting them into the latest one,
3532      we must make a new PARALLEL for the latest insn
3533      to hold additional the SETs.  */
3534 
3535   if (added_sets_0 || added_sets_1 || added_sets_2)
3536     {
3537       int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3538       combine_extras++;
3539 
3540       if (GET_CODE (newpat) == PARALLEL)
3541 	{
3542 	  rtvec old = XVEC (newpat, 0);
3543 	  total_sets = XVECLEN (newpat, 0) + extra_sets;
3544 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3545 	  memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3546 		  sizeof (old->elem[0]) * old->num_elem);
3547 	}
3548       else
3549 	{
3550 	  rtx old = newpat;
3551 	  total_sets = 1 + extra_sets;
3552 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3553 	  XVECEXP (newpat, 0, 0) = old;
3554 	}
3555 
3556       if (added_sets_0)
3557 	XVECEXP (newpat, 0, --total_sets) = i0pat;
3558 
3559       if (added_sets_1)
3560 	{
3561 	  rtx t = i1pat;
3562 	  if (i0_feeds_i1_n)
3563 	    t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3564 
3565 	  XVECEXP (newpat, 0, --total_sets) = t;
3566 	}
3567       if (added_sets_2)
3568 	{
3569 	  rtx t = i2pat;
3570 	  if (i1_feeds_i2_n)
3571 	    t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3572 		       i0_feeds_i1_n && i0dest_in_i0src);
3573 	  if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3574 	    t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3575 
3576 	  XVECEXP (newpat, 0, --total_sets) = t;
3577 	}
3578     }
3579 
3580  validate_replacement:
3581 
3582   /* Note which hard regs this insn has as inputs.  */
3583   mark_used_regs_combine (newpat);
3584 
3585   /* If recog_for_combine fails, it strips existing clobbers.  If we'll
3586      consider splitting this pattern, we might need these clobbers.  */
3587   if (i1 && GET_CODE (newpat) == PARALLEL
3588       && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3589     {
3590       int len = XVECLEN (newpat, 0);
3591 
3592       newpat_vec_with_clobbers = rtvec_alloc (len);
3593       for (i = 0; i < len; i++)
3594 	RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3595     }
3596 
3597   /* We have recognized nothing yet.  */
3598   insn_code_number = -1;
3599 
3600   /* See if this is a PARALLEL of two SETs where one SET's destination is
3601      a register that is unused and this isn't marked as an instruction that
3602      might trap in an EH region.  In that case, we just need the other SET.
3603      We prefer this over the PARALLEL.
3604 
3605      This can occur when simplifying a divmod insn.  We *must* test for this
3606      case here because the code below that splits two independent SETs doesn't
3607      handle this case correctly when it updates the register status.
3608 
3609      It's pointless doing this if we originally had two sets, one from
3610      i3, and one from i2.  Combining then splitting the parallel results
3611      in the original i2 again plus an invalid insn (which we delete).
3612      The net effect is only to move instructions around, which makes
3613      debug info less accurate.
3614 
3615      If the remaining SET came from I2 its destination should not be used
3616      between I2 and I3.  See PR82024.  */
3617 
3618   if (!(added_sets_2 && i1 == 0)
3619       && is_parallel_of_n_reg_sets (newpat, 2)
3620       && asm_noperands (newpat) < 0)
3621     {
3622       rtx set0 = XVECEXP (newpat, 0, 0);
3623       rtx set1 = XVECEXP (newpat, 0, 1);
3624       rtx oldpat = newpat;
3625 
3626       if (((REG_P (SET_DEST (set1))
3627 	    && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3628 	   || (GET_CODE (SET_DEST (set1)) == SUBREG
3629 	       && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3630 	  && insn_nothrow_p (i3)
3631 	  && !side_effects_p (SET_SRC (set1)))
3632 	{
3633 	  newpat = set0;
3634 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3635 	}
3636 
3637       else if (((REG_P (SET_DEST (set0))
3638 		 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3639 		|| (GET_CODE (SET_DEST (set0)) == SUBREG
3640 		    && find_reg_note (i3, REG_UNUSED,
3641 				      SUBREG_REG (SET_DEST (set0)))))
3642 	       && insn_nothrow_p (i3)
3643 	       && !side_effects_p (SET_SRC (set0)))
3644 	{
3645 	  rtx dest = SET_DEST (set1);
3646 	  if (GET_CODE (dest) == SUBREG)
3647 	    dest = SUBREG_REG (dest);
3648 	  if (!reg_used_between_p (dest, i2, i3))
3649 	    {
3650 	      newpat = set1;
3651 	      insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3652 
3653 	      if (insn_code_number >= 0)
3654 		changed_i3_dest = 1;
3655 	    }
3656 	}
3657 
3658       if (insn_code_number < 0)
3659 	newpat = oldpat;
3660     }
3661 
3662   /* Is the result of combination a valid instruction?  */
3663   if (insn_code_number < 0)
3664     insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3665 
3666   /* If we were combining three insns and the result is a simple SET
3667      with no ASM_OPERANDS that wasn't recognized, try to split it into two
3668      insns.  There are two ways to do this.  It can be split using a
3669      machine-specific method (like when you have an addition of a large
3670      constant) or by combine in the function find_split_point.  */
3671 
3672   if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3673       && asm_noperands (newpat) < 0)
3674     {
3675       rtx parallel, *split;
3676       rtx_insn *m_split_insn;
3677 
3678       /* See if the MD file can split NEWPAT.  If it can't, see if letting it
3679 	 use I2DEST as a scratch register will help.  In the latter case,
3680 	 convert I2DEST to the mode of the source of NEWPAT if we can.  */
3681 
3682       m_split_insn = combine_split_insns (newpat, i3);
3683 
3684       /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3685 	 inputs of NEWPAT.  */
3686 
3687       /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3688 	 possible to try that as a scratch reg.  This would require adding
3689 	 more code to make it work though.  */
3690 
3691       if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3692 	{
3693 	  machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3694 
3695 	  /* ??? Reusing i2dest without resetting the reg_stat entry for it
3696 	     (temporarily, until we are committed to this instruction
3697 	     combination) does not work: for example, any call to nonzero_bits
3698 	     on the register (from a splitter in the MD file, for example)
3699 	     will get the old information, which is invalid.
3700 
3701 	     Since nowadays we can create registers during combine just fine,
3702 	     we should just create a new one here, not reuse i2dest.  */
3703 
3704 	  /* First try to split using the original register as a
3705 	     scratch register.  */
3706 	  parallel = gen_rtx_PARALLEL (VOIDmode,
3707 				       gen_rtvec (2, newpat,
3708 						  gen_rtx_CLOBBER (VOIDmode,
3709 								   i2dest)));
3710 	  m_split_insn = combine_split_insns (parallel, i3);
3711 
3712 	  /* If that didn't work, try changing the mode of I2DEST if
3713 	     we can.  */
3714 	  if (m_split_insn == 0
3715 	      && new_mode != GET_MODE (i2dest)
3716 	      && new_mode != VOIDmode
3717 	      && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3718 	    {
3719 	      machine_mode old_mode = GET_MODE (i2dest);
3720 	      rtx ni2dest;
3721 
3722 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3723 		ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3724 	      else
3725 		{
3726 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3727 		  ni2dest = regno_reg_rtx[REGNO (i2dest)];
3728 		}
3729 
3730 	      parallel = (gen_rtx_PARALLEL
3731 			  (VOIDmode,
3732 			   gen_rtvec (2, newpat,
3733 				      gen_rtx_CLOBBER (VOIDmode,
3734 						       ni2dest))));
3735 	      m_split_insn = combine_split_insns (parallel, i3);
3736 
3737 	      if (m_split_insn == 0
3738 		  && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3739 		{
3740 		  struct undo *buf;
3741 
3742 		  adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3743 		  buf = undobuf.undos;
3744 		  undobuf.undos = buf->next;
3745 		  buf->next = undobuf.frees;
3746 		  undobuf.frees = buf;
3747 		}
3748 	    }
3749 
3750 	  i2scratch = m_split_insn != 0;
3751 	}
3752 
3753       /* If recog_for_combine has discarded clobbers, try to use them
3754 	 again for the split.  */
3755       if (m_split_insn == 0 && newpat_vec_with_clobbers)
3756 	{
3757 	  parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3758 	  m_split_insn = combine_split_insns (parallel, i3);
3759 	}
3760 
3761       if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3762 	{
3763 	  rtx m_split_pat = PATTERN (m_split_insn);
3764 	  insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3765 	  if (insn_code_number >= 0)
3766 	    newpat = m_split_pat;
3767 	}
3768       else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3769 	       && (next_nonnote_nondebug_insn (i2) == i3
3770 		   || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3771 	{
3772 	  rtx i2set, i3set;
3773 	  rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3774 	  newi2pat = PATTERN (m_split_insn);
3775 
3776 	  i3set = single_set (NEXT_INSN (m_split_insn));
3777 	  i2set = single_set (m_split_insn);
3778 
3779 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3780 
3781 	  /* If I2 or I3 has multiple SETs, we won't know how to track
3782 	     register status, so don't use these insns.  If I2's destination
3783 	     is used between I2 and I3, we also can't use these insns.  */
3784 
3785 	  if (i2_code_number >= 0 && i2set && i3set
3786 	      && (next_nonnote_nondebug_insn (i2) == i3
3787 		  || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3788 	    insn_code_number = recog_for_combine (&newi3pat, i3,
3789 						  &new_i3_notes);
3790 	  if (insn_code_number >= 0)
3791 	    newpat = newi3pat;
3792 
3793 	  /* It is possible that both insns now set the destination of I3.
3794 	     If so, we must show an extra use of it.  */
3795 
3796 	  if (insn_code_number >= 0)
3797 	    {
3798 	      rtx new_i3_dest = SET_DEST (i3set);
3799 	      rtx new_i2_dest = SET_DEST (i2set);
3800 
3801 	      while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3802 		     || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3803 		     || GET_CODE (new_i3_dest) == SUBREG)
3804 		new_i3_dest = XEXP (new_i3_dest, 0);
3805 
3806 	      while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3807 		     || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3808 		     || GET_CODE (new_i2_dest) == SUBREG)
3809 		new_i2_dest = XEXP (new_i2_dest, 0);
3810 
3811 	      if (REG_P (new_i3_dest)
3812 		  && REG_P (new_i2_dest)
3813 		  && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3814 		  && REGNO (new_i2_dest) < reg_n_sets_max)
3815 		INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3816 	    }
3817 	}
3818 
3819       /* If we can split it and use I2DEST, go ahead and see if that
3820 	 helps things be recognized.  Verify that none of the registers
3821 	 are set between I2 and I3.  */
3822       if (insn_code_number < 0
3823           && (split = find_split_point (&newpat, i3, false)) != 0
3824 	  && (!HAVE_cc0 || REG_P (i2dest))
3825 	  /* We need I2DEST in the proper mode.  If it is a hard register
3826 	     or the only use of a pseudo, we can change its mode.
3827 	     Make sure we don't change a hard register to have a mode that
3828 	     isn't valid for it, or change the number of registers.  */
3829 	  && (GET_MODE (*split) == GET_MODE (i2dest)
3830 	      || GET_MODE (*split) == VOIDmode
3831 	      || can_change_dest_mode (i2dest, added_sets_2,
3832 				       GET_MODE (*split)))
3833 	  && (next_nonnote_nondebug_insn (i2) == i3
3834 	      || !modified_between_p (*split, i2, i3))
3835 	  /* We can't overwrite I2DEST if its value is still used by
3836 	     NEWPAT.  */
3837 	  && ! reg_referenced_p (i2dest, newpat))
3838 	{
3839 	  rtx newdest = i2dest;
3840 	  enum rtx_code split_code = GET_CODE (*split);
3841 	  machine_mode split_mode = GET_MODE (*split);
3842 	  bool subst_done = false;
3843 	  newi2pat = NULL_RTX;
3844 
3845 	  i2scratch = true;
3846 
3847 	  /* *SPLIT may be part of I2SRC, so make sure we have the
3848 	     original expression around for later debug processing.
3849 	     We should not need I2SRC any more in other cases.  */
3850 	  if (MAY_HAVE_DEBUG_BIND_INSNS)
3851 	    i2src = copy_rtx (i2src);
3852 	  else
3853 	    i2src = NULL;
3854 
3855 	  /* Get NEWDEST as a register in the proper mode.  We have already
3856 	     validated that we can do this.  */
3857 	  if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3858 	    {
3859 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3860 		newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3861 	      else
3862 		{
3863 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3864 		  newdest = regno_reg_rtx[REGNO (i2dest)];
3865 		}
3866 	    }
3867 
3868 	  /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3869 	     an ASHIFT.  This can occur if it was inside a PLUS and hence
3870 	     appeared to be a memory address.  This is a kludge.  */
3871 	  if (split_code == MULT
3872 	      && CONST_INT_P (XEXP (*split, 1))
3873 	      && INTVAL (XEXP (*split, 1)) > 0
3874 	      && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3875 	    {
3876 	      rtx i_rtx = gen_int_shift_amount (split_mode, i);
3877 	      SUBST (*split, gen_rtx_ASHIFT (split_mode,
3878 					     XEXP (*split, 0), i_rtx));
3879 	      /* Update split_code because we may not have a multiply
3880 		 anymore.  */
3881 	      split_code = GET_CODE (*split);
3882 	    }
3883 
3884 	  /* Similarly for (plus (mult FOO (const_int pow2))).  */
3885 	  if (split_code == PLUS
3886 	      && GET_CODE (XEXP (*split, 0)) == MULT
3887 	      && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3888 	      && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3889 	      && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3890 	    {
3891 	      rtx nsplit = XEXP (*split, 0);
3892 	      rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3893 	      SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3894 						       XEXP (nsplit, 0),
3895 						       i_rtx));
3896 	      /* Update split_code because we may not have a multiply
3897 		 anymore.  */
3898 	      split_code = GET_CODE (*split);
3899 	    }
3900 
3901 #ifdef INSN_SCHEDULING
3902 	  /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3903 	     be written as a ZERO_EXTEND.  */
3904 	  if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3905 	    {
3906 	      /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3907 		 what it really is.  */
3908 	      if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3909 		  == SIGN_EXTEND)
3910 		SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3911 						    SUBREG_REG (*split)));
3912 	      else
3913 		SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3914 						    SUBREG_REG (*split)));
3915 	    }
3916 #endif
3917 
3918 	  /* Attempt to split binary operators using arithmetic identities.  */
3919 	  if (BINARY_P (SET_SRC (newpat))
3920 	      && split_mode == GET_MODE (SET_SRC (newpat))
3921 	      && ! side_effects_p (SET_SRC (newpat)))
3922 	    {
3923 	      rtx setsrc = SET_SRC (newpat);
3924 	      machine_mode mode = GET_MODE (setsrc);
3925 	      enum rtx_code code = GET_CODE (setsrc);
3926 	      rtx src_op0 = XEXP (setsrc, 0);
3927 	      rtx src_op1 = XEXP (setsrc, 1);
3928 
3929 	      /* Split "X = Y op Y" as "Z = Y; X = Z op Z".  */
3930 	      if (rtx_equal_p (src_op0, src_op1))
3931 		{
3932 		  newi2pat = gen_rtx_SET (newdest, src_op0);
3933 		  SUBST (XEXP (setsrc, 0), newdest);
3934 		  SUBST (XEXP (setsrc, 1), newdest);
3935 		  subst_done = true;
3936 		}
3937 	      /* Split "((P op Q) op R) op S" where op is PLUS or MULT.  */
3938 	      else if ((code == PLUS || code == MULT)
3939 		       && GET_CODE (src_op0) == code
3940 		       && GET_CODE (XEXP (src_op0, 0)) == code
3941 		       && (INTEGRAL_MODE_P (mode)
3942 			   || (FLOAT_MODE_P (mode)
3943 			       && flag_unsafe_math_optimizations)))
3944 		{
3945 		  rtx p = XEXP (XEXP (src_op0, 0), 0);
3946 		  rtx q = XEXP (XEXP (src_op0, 0), 1);
3947 		  rtx r = XEXP (src_op0, 1);
3948 		  rtx s = src_op1;
3949 
3950 		  /* Split both "((X op Y) op X) op Y" and
3951 		     "((X op Y) op Y) op X" as "T op T" where T is
3952 		     "X op Y".  */
3953 		  if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3954 		       || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3955 		    {
3956 		      newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3957 		      SUBST (XEXP (setsrc, 0), newdest);
3958 		      SUBST (XEXP (setsrc, 1), newdest);
3959 		      subst_done = true;
3960 		    }
3961 		  /* Split "((X op X) op Y) op Y)" as "T op T" where
3962 		     T is "X op Y".  */
3963 		  else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3964 		    {
3965 		      rtx tmp = simplify_gen_binary (code, mode, p, r);
3966 		      newi2pat = gen_rtx_SET (newdest, tmp);
3967 		      SUBST (XEXP (setsrc, 0), newdest);
3968 		      SUBST (XEXP (setsrc, 1), newdest);
3969 		      subst_done = true;
3970 		    }
3971 		}
3972 	    }
3973 
3974 	  if (!subst_done)
3975 	    {
3976 	      newi2pat = gen_rtx_SET (newdest, *split);
3977 	      SUBST (*split, newdest);
3978 	    }
3979 
3980 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3981 
3982 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3983 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3984 	  if (GET_CODE (newi2pat) == PARALLEL)
3985 	    for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3986 	      if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3987 		{
3988 		  rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3989 		  if (reg_overlap_mentioned_p (reg, newpat))
3990 		    {
3991 		      undo_all ();
3992 		      return 0;
3993 		    }
3994 		}
3995 
3996 	  /* If the split point was a MULT and we didn't have one before,
3997 	     don't use one now.  */
3998 	  if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3999 	    insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4000 	}
4001     }
4002 
4003   /* Check for a case where we loaded from memory in a narrow mode and
4004      then sign extended it, but we need both registers.  In that case,
4005      we have a PARALLEL with both loads from the same memory location.
4006      We can split this into a load from memory followed by a register-register
4007      copy.  This saves at least one insn, more if register allocation can
4008      eliminate the copy.
4009 
4010      We cannot do this if the destination of the first assignment is a
4011      condition code register or cc0.  We eliminate this case by making sure
4012      the SET_DEST and SET_SRC have the same mode.
4013 
4014      We cannot do this if the destination of the second assignment is
4015      a register that we have already assumed is zero-extended.  Similarly
4016      for a SUBREG of such a register.  */
4017 
4018   else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
4019 	   && GET_CODE (newpat) == PARALLEL
4020 	   && XVECLEN (newpat, 0) == 2
4021 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4022 	   && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
4023 	   && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
4024 	       == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
4025 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4026 	   && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
4027 			   XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
4028 	   && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
4029 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4030 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4031 	   && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
4032 		 (REG_P (temp_expr)
4033 		  && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4034 		  && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4035 			       BITS_PER_WORD)
4036 		  && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4037 			       HOST_BITS_PER_INT)
4038 		  && (reg_stat[REGNO (temp_expr)].nonzero_bits
4039 		      != GET_MODE_MASK (word_mode))))
4040 	   && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
4041 		 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
4042 		     (REG_P (temp_expr)
4043 		      && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4044 		      && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4045 				   BITS_PER_WORD)
4046 		      && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4047 				   HOST_BITS_PER_INT)
4048 		      && (reg_stat[REGNO (temp_expr)].nonzero_bits
4049 			  != GET_MODE_MASK (word_mode)))))
4050 	   && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4051 					 SET_SRC (XVECEXP (newpat, 0, 1)))
4052 	   && ! find_reg_note (i3, REG_UNUSED,
4053 			       SET_DEST (XVECEXP (newpat, 0, 0))))
4054     {
4055       rtx ni2dest;
4056 
4057       newi2pat = XVECEXP (newpat, 0, 0);
4058       ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
4059       newpat = XVECEXP (newpat, 0, 1);
4060       SUBST (SET_SRC (newpat),
4061 	     gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4062       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4063 
4064       if (i2_code_number >= 0)
4065 	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4066 
4067       if (insn_code_number >= 0)
4068 	swap_i2i3 = 1;
4069     }
4070 
4071   /* Similarly, check for a case where we have a PARALLEL of two independent
4072      SETs but we started with three insns.  In this case, we can do the sets
4073      as two separate insns.  This case occurs when some SET allows two
4074      other insns to combine, but the destination of that SET is still live.
4075 
4076      Also do this if we started with two insns and (at least) one of the
4077      resulting sets is a noop; this noop will be deleted later.
4078 
4079      Also do this if we started with two insns neither of which was a simple
4080      move.  */
4081 
4082   else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4083 	   && GET_CODE (newpat) == PARALLEL
4084 	   && XVECLEN (newpat, 0) == 2
4085 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4086 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4087 	   && (i1
4088 	       || set_noop_p (XVECEXP (newpat, 0, 0))
4089 	       || set_noop_p (XVECEXP (newpat, 0, 1))
4090 	       || (!i2_was_move && !i3_was_move))
4091 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4092 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4093 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4094 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4095 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4096 				  XVECEXP (newpat, 0, 0))
4097 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4098 				  XVECEXP (newpat, 0, 1))
4099 	   && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4100 		 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4101     {
4102       rtx set0 = XVECEXP (newpat, 0, 0);
4103       rtx set1 = XVECEXP (newpat, 0, 1);
4104 
4105       /* Normally, it doesn't matter which of the two is done first,
4106 	 but the one that references cc0 can't be the second, and
4107 	 one which uses any regs/memory set in between i2 and i3 can't
4108 	 be first.  The PARALLEL might also have been pre-existing in i3,
4109 	 so we need to make sure that we won't wrongly hoist a SET to i2
4110 	 that would conflict with a death note present in there, or would
4111 	 have its dest modified between i2 and i3.  */
4112       if (!modified_between_p (SET_SRC (set1), i2, i3)
4113 	  && !(REG_P (SET_DEST (set1))
4114 	       && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4115 	  && !(GET_CODE (SET_DEST (set1)) == SUBREG
4116 	       && find_reg_note (i2, REG_DEAD,
4117 				 SUBREG_REG (SET_DEST (set1))))
4118 	  && !modified_between_p (SET_DEST (set1), i2, i3)
4119 	  && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4120 	  /* If I3 is a jump, ensure that set0 is a jump so that
4121 	     we do not create invalid RTL.  */
4122 	  && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4123 	 )
4124 	{
4125 	  newi2pat = set1;
4126 	  newpat = set0;
4127 	}
4128       else if (!modified_between_p (SET_SRC (set0), i2, i3)
4129 	       && !(REG_P (SET_DEST (set0))
4130 		    && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4131 	       && !(GET_CODE (SET_DEST (set0)) == SUBREG
4132 		    && find_reg_note (i2, REG_DEAD,
4133 				      SUBREG_REG (SET_DEST (set0))))
4134 	       && !modified_between_p (SET_DEST (set0), i2, i3)
4135 	       && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4136 	       /* If I3 is a jump, ensure that set1 is a jump so that
4137 		  we do not create invalid RTL.  */
4138 	       && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4139 	      )
4140 	{
4141 	  newi2pat = set0;
4142 	  newpat = set1;
4143 	}
4144       else
4145 	{
4146 	  undo_all ();
4147 	  return 0;
4148 	}
4149 
4150       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4151 
4152       if (i2_code_number >= 0)
4153 	{
4154 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
4155 	     Make sure NEWPAT does not depend on the clobbered regs.  */
4156 	  if (GET_CODE (newi2pat) == PARALLEL)
4157 	    {
4158 	      for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4159 		if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4160 		  {
4161 		    rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4162 		    if (reg_overlap_mentioned_p (reg, newpat))
4163 		      {
4164 			undo_all ();
4165 			return 0;
4166 		      }
4167 		  }
4168 	    }
4169 
4170 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4171 
4172 	  if (insn_code_number >= 0)
4173 	    split_i2i3 = 1;
4174 	}
4175     }
4176 
4177   /* If it still isn't recognized, fail and change things back the way they
4178      were.  */
4179   if ((insn_code_number < 0
4180        /* Is the result a reasonable ASM_OPERANDS?  */
4181        && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4182     {
4183       undo_all ();
4184       return 0;
4185     }
4186 
4187   /* If we had to change another insn, make sure it is valid also.  */
4188   if (undobuf.other_insn)
4189     {
4190       CLEAR_HARD_REG_SET (newpat_used_regs);
4191 
4192       other_pat = PATTERN (undobuf.other_insn);
4193       other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4194 					     &new_other_notes);
4195 
4196       if (other_code_number < 0 && ! check_asm_operands (other_pat))
4197 	{
4198 	  undo_all ();
4199 	  return 0;
4200 	}
4201     }
4202 
4203   /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4204      they are adjacent to each other or not.  */
4205   if (HAVE_cc0)
4206     {
4207       rtx_insn *p = prev_nonnote_insn (i3);
4208       if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4209 	  && sets_cc0_p (newi2pat))
4210 	{
4211 	  undo_all ();
4212 	  return 0;
4213 	}
4214     }
4215 
4216   /* Only allow this combination if insn_cost reports that the
4217      replacement instructions are cheaper than the originals.  */
4218   if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4219     {
4220       undo_all ();
4221       return 0;
4222     }
4223 
4224   if (MAY_HAVE_DEBUG_BIND_INSNS)
4225     {
4226       struct undo *undo;
4227 
4228       for (undo = undobuf.undos; undo; undo = undo->next)
4229 	if (undo->kind == UNDO_MODE)
4230 	  {
4231 	    rtx reg = *undo->where.r;
4232 	    machine_mode new_mode = GET_MODE (reg);
4233 	    machine_mode old_mode = undo->old_contents.m;
4234 
4235 	    /* Temporarily revert mode back.  */
4236 	    adjust_reg_mode (reg, old_mode);
4237 
4238 	    if (reg == i2dest && i2scratch)
4239 	      {
4240 		/* If we used i2dest as a scratch register with a
4241 		   different mode, substitute it for the original
4242 		   i2src while its original mode is temporarily
4243 		   restored, and then clear i2scratch so that we don't
4244 		   do it again later.  */
4245 		propagate_for_debug (i2, last_combined_insn, reg, i2src,
4246 				     this_basic_block);
4247 		i2scratch = false;
4248 		/* Put back the new mode.  */
4249 		adjust_reg_mode (reg, new_mode);
4250 	      }
4251 	    else
4252 	      {
4253 		rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4254 		rtx_insn *first, *last;
4255 
4256 		if (reg == i2dest)
4257 		  {
4258 		    first = i2;
4259 		    last = last_combined_insn;
4260 		  }
4261 		else
4262 		  {
4263 		    first = i3;
4264 		    last = undobuf.other_insn;
4265 		    gcc_assert (last);
4266 		    if (DF_INSN_LUID (last)
4267 			< DF_INSN_LUID (last_combined_insn))
4268 		      last = last_combined_insn;
4269 		  }
4270 
4271 		/* We're dealing with a reg that changed mode but not
4272 		   meaning, so we want to turn it into a subreg for
4273 		   the new mode.  However, because of REG sharing and
4274 		   because its mode had already changed, we have to do
4275 		   it in two steps.  First, replace any debug uses of
4276 		   reg, with its original mode temporarily restored,
4277 		   with this copy we have created; then, replace the
4278 		   copy with the SUBREG of the original shared reg,
4279 		   once again changed to the new mode.  */
4280 		propagate_for_debug (first, last, reg, tempreg,
4281 				     this_basic_block);
4282 		adjust_reg_mode (reg, new_mode);
4283 		propagate_for_debug (first, last, tempreg,
4284 				     lowpart_subreg (old_mode, reg, new_mode),
4285 				     this_basic_block);
4286 	      }
4287 	  }
4288     }
4289 
4290   /* If we will be able to accept this, we have made a
4291      change to the destination of I3.  This requires us to
4292      do a few adjustments.  */
4293 
4294   if (changed_i3_dest)
4295     {
4296       PATTERN (i3) = newpat;
4297       adjust_for_new_dest (i3);
4298     }
4299 
4300   /* We now know that we can do this combination.  Merge the insns and
4301      update the status of registers and LOG_LINKS.  */
4302 
4303   if (undobuf.other_insn)
4304     {
4305       rtx note, next;
4306 
4307       PATTERN (undobuf.other_insn) = other_pat;
4308 
4309       /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4310 	 ensure that they are still valid.  Then add any non-duplicate
4311 	 notes added by recog_for_combine.  */
4312       for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4313 	{
4314 	  next = XEXP (note, 1);
4315 
4316 	  if ((REG_NOTE_KIND (note) == REG_DEAD
4317 	       && !reg_referenced_p (XEXP (note, 0),
4318 				     PATTERN (undobuf.other_insn)))
4319 	      ||(REG_NOTE_KIND (note) == REG_UNUSED
4320 		 && !reg_set_p (XEXP (note, 0),
4321 				PATTERN (undobuf.other_insn)))
4322 	      /* Simply drop equal note since it may be no longer valid
4323 		 for other_insn.  It may be possible to record that CC
4324 		 register is changed and only discard those notes, but
4325 		 in practice it's unnecessary complication and doesn't
4326 		 give any meaningful improvement.
4327 
4328 		 See PR78559.  */
4329 	      || REG_NOTE_KIND (note) == REG_EQUAL
4330 	      || REG_NOTE_KIND (note) == REG_EQUIV)
4331 	    remove_note (undobuf.other_insn, note);
4332 	}
4333 
4334       distribute_notes  (new_other_notes, undobuf.other_insn,
4335 			undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4336 			NULL_RTX);
4337     }
4338 
4339   if (swap_i2i3)
4340     {
4341       /* I3 now uses what used to be its destination and which is now
4342 	 I2's destination.  This requires us to do a few adjustments.  */
4343       PATTERN (i3) = newpat;
4344       adjust_for_new_dest (i3);
4345     }
4346 
4347   if (swap_i2i3 || split_i2i3)
4348     {
4349       /* We might need a LOG_LINK from I3 to I2.  But then we used to
4350 	 have one, so we still will.
4351 
4352 	 However, some later insn might be using I2's dest and have
4353 	 a LOG_LINK pointing at I3.  We should change it to point at
4354 	 I2 instead.  */
4355 
4356       /* newi2pat is usually a SET here; however, recog_for_combine might
4357 	 have added some clobbers.  */
4358       rtx x = newi2pat;
4359       if (GET_CODE (x) == PARALLEL)
4360 	x = XVECEXP (newi2pat, 0, 0);
4361 
4362       /* It can only be a SET of a REG or of a SUBREG of a REG.  */
4363       unsigned int regno = reg_or_subregno (SET_DEST (x));
4364 
4365       bool done = false;
4366       for (rtx_insn *insn = NEXT_INSN (i3);
4367 	   !done
4368 	   && insn
4369 	   && NONDEBUG_INSN_P (insn)
4370 	   && BLOCK_FOR_INSN (insn) == this_basic_block;
4371 	   insn = NEXT_INSN (insn))
4372 	{
4373 	  struct insn_link *link;
4374 	  FOR_EACH_LOG_LINK (link, insn)
4375 	    if (link->insn == i3 && link->regno == regno)
4376 	      {
4377 		link->insn = i2;
4378 		done = true;
4379 		break;
4380 	      }
4381 	}
4382     }
4383 
4384   {
4385     rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4386     struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4387     rtx midnotes = 0;
4388     int from_luid;
4389     /* Compute which registers we expect to eliminate.  newi2pat may be setting
4390        either i3dest or i2dest, so we must check it.  */
4391     rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4392 		   || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4393 		   || !i2dest_killed
4394 		   ? 0 : i2dest);
4395     /* For i1, we need to compute both local elimination and global
4396        elimination information with respect to newi2pat because i1dest
4397        may be the same as i3dest, in which case newi2pat may be setting
4398        i1dest.  Global information is used when distributing REG_DEAD
4399        note for i2 and i3, in which case it does matter if newi2pat sets
4400        i1dest or not.
4401 
4402        Local information is used when distributing REG_DEAD note for i1,
4403        in which case it doesn't matter if newi2pat sets i1dest or not.
4404        See PR62151, if we have four insns combination:
4405 	   i0: r0 <- i0src
4406 	   i1: r1 <- i1src (using r0)
4407 		     REG_DEAD (r0)
4408 	   i2: r0 <- i2src (using r1)
4409 	   i3: r3 <- i3src (using r0)
4410 	   ix: using r0
4411        From i1's point of view, r0 is eliminated, no matter if it is set
4412        by newi2pat or not.  In other words, REG_DEAD info for r0 in i1
4413        should be discarded.
4414 
4415        Note local information only affects cases in forms like "I1->I2->I3",
4416        "I0->I1->I2->I3" or "I0&I1->I2, I2->I3".  For other cases like
4417        "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4418        i0dest anyway.  */
4419     rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4420 			 || !i1dest_killed
4421 			 ? 0 : i1dest);
4422     rtx elim_i1 = (local_elim_i1 == 0
4423 		   || (newi2pat && reg_set_p (i1dest, newi2pat))
4424 		   ? 0 : i1dest);
4425     /* Same case as i1.  */
4426     rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4427 			 ? 0 : i0dest);
4428     rtx elim_i0 = (local_elim_i0 == 0
4429 		   || (newi2pat && reg_set_p (i0dest, newi2pat))
4430 		   ? 0 : i0dest);
4431 
4432     /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4433        clear them.  */
4434     i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4435     i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4436     if (i1)
4437       i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4438     if (i0)
4439       i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4440 
4441     /* Ensure that we do not have something that should not be shared but
4442        occurs multiple times in the new insns.  Check this by first
4443        resetting all the `used' flags and then copying anything is shared.  */
4444 
4445     reset_used_flags (i3notes);
4446     reset_used_flags (i2notes);
4447     reset_used_flags (i1notes);
4448     reset_used_flags (i0notes);
4449     reset_used_flags (newpat);
4450     reset_used_flags (newi2pat);
4451     if (undobuf.other_insn)
4452       reset_used_flags (PATTERN (undobuf.other_insn));
4453 
4454     i3notes = copy_rtx_if_shared (i3notes);
4455     i2notes = copy_rtx_if_shared (i2notes);
4456     i1notes = copy_rtx_if_shared (i1notes);
4457     i0notes = copy_rtx_if_shared (i0notes);
4458     newpat = copy_rtx_if_shared (newpat);
4459     newi2pat = copy_rtx_if_shared (newi2pat);
4460     if (undobuf.other_insn)
4461       reset_used_flags (PATTERN (undobuf.other_insn));
4462 
4463     INSN_CODE (i3) = insn_code_number;
4464     PATTERN (i3) = newpat;
4465 
4466     if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4467       {
4468 	for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4469 	     link = XEXP (link, 1))
4470 	  {
4471 	    if (substed_i2)
4472 	      {
4473 		/* I2SRC must still be meaningful at this point.  Some
4474 		   splitting operations can invalidate I2SRC, but those
4475 		   operations do not apply to calls.  */
4476 		gcc_assert (i2src);
4477 		XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4478 						       i2dest, i2src);
4479 	      }
4480 	    if (substed_i1)
4481 	      XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4482 						     i1dest, i1src);
4483 	    if (substed_i0)
4484 	      XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4485 						     i0dest, i0src);
4486 	  }
4487       }
4488 
4489     if (undobuf.other_insn)
4490       INSN_CODE (undobuf.other_insn) = other_code_number;
4491 
4492     /* We had one special case above where I2 had more than one set and
4493        we replaced a destination of one of those sets with the destination
4494        of I3.  In that case, we have to update LOG_LINKS of insns later
4495        in this basic block.  Note that this (expensive) case is rare.
4496 
4497        Also, in this case, we must pretend that all REG_NOTEs for I2
4498        actually came from I3, so that REG_UNUSED notes from I2 will be
4499        properly handled.  */
4500 
4501     if (i3_subst_into_i2)
4502       {
4503 	for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4504 	  if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4505 	       || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4506 	      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4507 	      && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4508 	      && ! find_reg_note (i2, REG_UNUSED,
4509 				  SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4510 	    for (temp_insn = NEXT_INSN (i2);
4511 		 temp_insn
4512 		 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4513 		     || BB_HEAD (this_basic_block) != temp_insn);
4514 		 temp_insn = NEXT_INSN (temp_insn))
4515 	      if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4516 		FOR_EACH_LOG_LINK (link, temp_insn)
4517 		  if (link->insn == i2)
4518 		    link->insn = i3;
4519 
4520 	if (i3notes)
4521 	  {
4522 	    rtx link = i3notes;
4523 	    while (XEXP (link, 1))
4524 	      link = XEXP (link, 1);
4525 	    XEXP (link, 1) = i2notes;
4526 	  }
4527 	else
4528 	  i3notes = i2notes;
4529 	i2notes = 0;
4530       }
4531 
4532     LOG_LINKS (i3) = NULL;
4533     REG_NOTES (i3) = 0;
4534     LOG_LINKS (i2) = NULL;
4535     REG_NOTES (i2) = 0;
4536 
4537     if (newi2pat)
4538       {
4539 	if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4540 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4541 			       this_basic_block);
4542 	INSN_CODE (i2) = i2_code_number;
4543 	PATTERN (i2) = newi2pat;
4544       }
4545     else
4546       {
4547 	if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4548 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4549 			       this_basic_block);
4550 	SET_INSN_DELETED (i2);
4551       }
4552 
4553     if (i1)
4554       {
4555 	LOG_LINKS (i1) = NULL;
4556 	REG_NOTES (i1) = 0;
4557 	if (MAY_HAVE_DEBUG_BIND_INSNS)
4558 	  propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4559 			       this_basic_block);
4560 	SET_INSN_DELETED (i1);
4561       }
4562 
4563     if (i0)
4564       {
4565 	LOG_LINKS (i0) = NULL;
4566 	REG_NOTES (i0) = 0;
4567 	if (MAY_HAVE_DEBUG_BIND_INSNS)
4568 	  propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4569 			       this_basic_block);
4570 	SET_INSN_DELETED (i0);
4571       }
4572 
4573     /* Get death notes for everything that is now used in either I3 or
4574        I2 and used to die in a previous insn.  If we built two new
4575        patterns, move from I1 to I2 then I2 to I3 so that we get the
4576        proper movement on registers that I2 modifies.  */
4577 
4578     if (i0)
4579       from_luid = DF_INSN_LUID (i0);
4580     else if (i1)
4581       from_luid = DF_INSN_LUID (i1);
4582     else
4583       from_luid = DF_INSN_LUID (i2);
4584     if (newi2pat)
4585       move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4586     move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4587 
4588     /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3.  */
4589     if (i3notes)
4590       distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4591 			elim_i2, elim_i1, elim_i0);
4592     if (i2notes)
4593       distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4594 			elim_i2, elim_i1, elim_i0);
4595     if (i1notes)
4596       distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4597 			elim_i2, local_elim_i1, local_elim_i0);
4598     if (i0notes)
4599       distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4600 			elim_i2, elim_i1, local_elim_i0);
4601     if (midnotes)
4602       distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4603 			elim_i2, elim_i1, elim_i0);
4604 
4605     /* Distribute any notes added to I2 or I3 by recog_for_combine.  We
4606        know these are REG_UNUSED and want them to go to the desired insn,
4607        so we always pass it as i3.  */
4608 
4609     if (newi2pat && new_i2_notes)
4610       distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4611 			NULL_RTX);
4612 
4613     if (new_i3_notes)
4614       distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4615 			NULL_RTX);
4616 
4617     /* If I3DEST was used in I3SRC, it really died in I3.  We may need to
4618        put a REG_DEAD note for it somewhere.  If NEWI2PAT exists and sets
4619        I3DEST, the death must be somewhere before I2, not I3.  If we passed I3
4620        in that case, it might delete I2.  Similarly for I2 and I1.
4621        Show an additional death due to the REG_DEAD note we make here.  If
4622        we discard it in distribute_notes, we will decrement it again.  */
4623 
4624     if (i3dest_killed)
4625       {
4626 	rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4627 	if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4628 	  distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4629 			    elim_i1, elim_i0);
4630 	else
4631 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4632 			    elim_i2, elim_i1, elim_i0);
4633       }
4634 
4635     if (i2dest_in_i2src)
4636       {
4637 	rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4638 	if (newi2pat && reg_set_p (i2dest, newi2pat))
4639 	  distribute_notes (new_note,  NULL, i2, NULL, NULL_RTX,
4640 			    NULL_RTX, NULL_RTX);
4641 	else
4642 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4643 			    NULL_RTX, NULL_RTX, NULL_RTX);
4644       }
4645 
4646     if (i1dest_in_i1src)
4647       {
4648 	rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4649 	if (newi2pat && reg_set_p (i1dest, newi2pat))
4650 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4651 			    NULL_RTX, NULL_RTX);
4652 	else
4653 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4654 			    NULL_RTX, NULL_RTX, NULL_RTX);
4655       }
4656 
4657     if (i0dest_in_i0src)
4658       {
4659 	rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4660 	if (newi2pat && reg_set_p (i0dest, newi2pat))
4661 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4662 			    NULL_RTX, NULL_RTX);
4663 	else
4664 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4665 			    NULL_RTX, NULL_RTX, NULL_RTX);
4666       }
4667 
4668     distribute_links (i3links);
4669     distribute_links (i2links);
4670     distribute_links (i1links);
4671     distribute_links (i0links);
4672 
4673     if (REG_P (i2dest))
4674       {
4675 	struct insn_link *link;
4676 	rtx_insn *i2_insn = 0;
4677 	rtx i2_val = 0, set;
4678 
4679 	/* The insn that used to set this register doesn't exist, and
4680 	   this life of the register may not exist either.  See if one of
4681 	   I3's links points to an insn that sets I2DEST.  If it does,
4682 	   that is now the last known value for I2DEST. If we don't update
4683 	   this and I2 set the register to a value that depended on its old
4684 	   contents, we will get confused.  If this insn is used, thing
4685 	   will be set correctly in combine_instructions.  */
4686 	FOR_EACH_LOG_LINK (link, i3)
4687 	  if ((set = single_set (link->insn)) != 0
4688 	      && rtx_equal_p (i2dest, SET_DEST (set)))
4689 	    i2_insn = link->insn, i2_val = SET_SRC (set);
4690 
4691 	record_value_for_reg (i2dest, i2_insn, i2_val);
4692 
4693 	/* If the reg formerly set in I2 died only once and that was in I3,
4694 	   zero its use count so it won't make `reload' do any work.  */
4695 	if (! added_sets_2
4696 	    && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4697 	    && ! i2dest_in_i2src
4698 	    && REGNO (i2dest) < reg_n_sets_max)
4699 	  INC_REG_N_SETS (REGNO (i2dest), -1);
4700       }
4701 
4702     if (i1 && REG_P (i1dest))
4703       {
4704 	struct insn_link *link;
4705 	rtx_insn *i1_insn = 0;
4706 	rtx i1_val = 0, set;
4707 
4708 	FOR_EACH_LOG_LINK (link, i3)
4709 	  if ((set = single_set (link->insn)) != 0
4710 	      && rtx_equal_p (i1dest, SET_DEST (set)))
4711 	    i1_insn = link->insn, i1_val = SET_SRC (set);
4712 
4713 	record_value_for_reg (i1dest, i1_insn, i1_val);
4714 
4715 	if (! added_sets_1
4716 	    && ! i1dest_in_i1src
4717 	    && REGNO (i1dest) < reg_n_sets_max)
4718 	  INC_REG_N_SETS (REGNO (i1dest), -1);
4719       }
4720 
4721     if (i0 && REG_P (i0dest))
4722       {
4723 	struct insn_link *link;
4724 	rtx_insn *i0_insn = 0;
4725 	rtx i0_val = 0, set;
4726 
4727 	FOR_EACH_LOG_LINK (link, i3)
4728 	  if ((set = single_set (link->insn)) != 0
4729 	      && rtx_equal_p (i0dest, SET_DEST (set)))
4730 	    i0_insn = link->insn, i0_val = SET_SRC (set);
4731 
4732 	record_value_for_reg (i0dest, i0_insn, i0_val);
4733 
4734 	if (! added_sets_0
4735 	    && ! i0dest_in_i0src
4736 	    && REGNO (i0dest) < reg_n_sets_max)
4737 	  INC_REG_N_SETS (REGNO (i0dest), -1);
4738       }
4739 
4740     /* Update reg_stat[].nonzero_bits et al for any changes that may have
4741        been made to this insn.  The order is important, because newi2pat
4742        can affect nonzero_bits of newpat.  */
4743     if (newi2pat)
4744       note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4745     note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4746   }
4747 
4748   if (undobuf.other_insn != NULL_RTX)
4749     {
4750       if (dump_file)
4751 	{
4752 	  fprintf (dump_file, "modifying other_insn ");
4753 	  dump_insn_slim (dump_file, undobuf.other_insn);
4754 	}
4755       df_insn_rescan (undobuf.other_insn);
4756     }
4757 
4758   if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4759     {
4760       if (dump_file)
4761 	{
4762 	  fprintf (dump_file, "modifying insn i0 ");
4763 	  dump_insn_slim (dump_file, i0);
4764 	}
4765       df_insn_rescan (i0);
4766     }
4767 
4768   if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4769     {
4770       if (dump_file)
4771 	{
4772 	  fprintf (dump_file, "modifying insn i1 ");
4773 	  dump_insn_slim (dump_file, i1);
4774 	}
4775       df_insn_rescan (i1);
4776     }
4777 
4778   if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4779     {
4780       if (dump_file)
4781 	{
4782 	  fprintf (dump_file, "modifying insn i2 ");
4783 	  dump_insn_slim (dump_file, i2);
4784 	}
4785       df_insn_rescan (i2);
4786     }
4787 
4788   if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4789     {
4790       if (dump_file)
4791 	{
4792 	  fprintf (dump_file, "modifying insn i3 ");
4793 	  dump_insn_slim (dump_file, i3);
4794 	}
4795       df_insn_rescan (i3);
4796     }
4797 
4798   /* Set new_direct_jump_p if a new return or simple jump instruction
4799      has been created.  Adjust the CFG accordingly.  */
4800   if (returnjump_p (i3) || any_uncondjump_p (i3))
4801     {
4802       *new_direct_jump_p = 1;
4803       mark_jump_label (PATTERN (i3), i3, 0);
4804       update_cfg_for_uncondjump (i3);
4805     }
4806 
4807   if (undobuf.other_insn != NULL_RTX
4808       && (returnjump_p (undobuf.other_insn)
4809 	  || any_uncondjump_p (undobuf.other_insn)))
4810     {
4811       *new_direct_jump_p = 1;
4812       update_cfg_for_uncondjump (undobuf.other_insn);
4813     }
4814 
4815   if (GET_CODE (PATTERN (i3)) == TRAP_IF
4816       && XEXP (PATTERN (i3), 0) == const1_rtx)
4817     {
4818       basic_block bb = BLOCK_FOR_INSN (i3);
4819       gcc_assert (bb);
4820       remove_edge (split_block (bb, i3));
4821       emit_barrier_after_bb (bb);
4822       *new_direct_jump_p = 1;
4823     }
4824 
4825   if (undobuf.other_insn
4826       && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4827       && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4828     {
4829       basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4830       gcc_assert (bb);
4831       remove_edge (split_block (bb, undobuf.other_insn));
4832       emit_barrier_after_bb (bb);
4833       *new_direct_jump_p = 1;
4834     }
4835 
4836   /* A noop might also need cleaning up of CFG, if it comes from the
4837      simplification of a jump.  */
4838   if (JUMP_P (i3)
4839       && GET_CODE (newpat) == SET
4840       && SET_SRC (newpat) == pc_rtx
4841       && SET_DEST (newpat) == pc_rtx)
4842     {
4843       *new_direct_jump_p = 1;
4844       update_cfg_for_uncondjump (i3);
4845     }
4846 
4847   if (undobuf.other_insn != NULL_RTX
4848       && JUMP_P (undobuf.other_insn)
4849       && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4850       && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4851       && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4852     {
4853       *new_direct_jump_p = 1;
4854       update_cfg_for_uncondjump (undobuf.other_insn);
4855     }
4856 
4857   combine_successes++;
4858   undo_commit ();
4859 
4860   rtx_insn *ret = newi2pat ? i2 : i3;
4861   if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4862     ret = added_links_insn;
4863   if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4864     ret = added_notes_insn;
4865 
4866   return ret;
4867 }
4868 
4869 /* Get a marker for undoing to the current state.  */
4870 
4871 static void *
4872 get_undo_marker (void)
4873 {
4874   return undobuf.undos;
4875 }
4876 
4877 /* Undo the modifications up to the marker.  */
4878 
4879 static void
4880 undo_to_marker (void *marker)
4881 {
4882   struct undo *undo, *next;
4883 
4884   for (undo = undobuf.undos; undo != marker; undo = next)
4885     {
4886       gcc_assert (undo);
4887 
4888       next = undo->next;
4889       switch (undo->kind)
4890 	{
4891 	case UNDO_RTX:
4892 	  *undo->where.r = undo->old_contents.r;
4893 	  break;
4894 	case UNDO_INT:
4895 	  *undo->where.i = undo->old_contents.i;
4896 	  break;
4897 	case UNDO_MODE:
4898 	  adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4899 	  break;
4900 	case UNDO_LINKS:
4901 	  *undo->where.l = undo->old_contents.l;
4902 	  break;
4903 	default:
4904 	  gcc_unreachable ();
4905 	}
4906 
4907       undo->next = undobuf.frees;
4908       undobuf.frees = undo;
4909     }
4910 
4911   undobuf.undos = (struct undo *) marker;
4912 }
4913 
4914 /* Undo all the modifications recorded in undobuf.  */
4915 
4916 static void
4917 undo_all (void)
4918 {
4919   undo_to_marker (0);
4920 }
4921 
4922 /* We've committed to accepting the changes we made.  Move all
4923    of the undos to the free list.  */
4924 
4925 static void
4926 undo_commit (void)
4927 {
4928   struct undo *undo, *next;
4929 
4930   for (undo = undobuf.undos; undo; undo = next)
4931     {
4932       next = undo->next;
4933       undo->next = undobuf.frees;
4934       undobuf.frees = undo;
4935     }
4936   undobuf.undos = 0;
4937 }
4938 
4939 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4940    where we have an arithmetic expression and return that point.  LOC will
4941    be inside INSN.
4942 
4943    try_combine will call this function to see if an insn can be split into
4944    two insns.  */
4945 
4946 static rtx *
4947 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4948 {
4949   rtx x = *loc;
4950   enum rtx_code code = GET_CODE (x);
4951   rtx *split;
4952   unsigned HOST_WIDE_INT len = 0;
4953   HOST_WIDE_INT pos = 0;
4954   int unsignedp = 0;
4955   rtx inner = NULL_RTX;
4956   scalar_int_mode mode, inner_mode;
4957 
4958   /* First special-case some codes.  */
4959   switch (code)
4960     {
4961     case SUBREG:
4962 #ifdef INSN_SCHEDULING
4963       /* If we are making a paradoxical SUBREG invalid, it becomes a split
4964 	 point.  */
4965       if (MEM_P (SUBREG_REG (x)))
4966 	return loc;
4967 #endif
4968       return find_split_point (&SUBREG_REG (x), insn, false);
4969 
4970     case MEM:
4971       /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4972 	 using LO_SUM and HIGH.  */
4973       if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4974 			  || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4975 	{
4976 	  machine_mode address_mode = get_address_mode (x);
4977 
4978 	  SUBST (XEXP (x, 0),
4979 		 gen_rtx_LO_SUM (address_mode,
4980 				 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4981 				 XEXP (x, 0)));
4982 	  return &XEXP (XEXP (x, 0), 0);
4983 	}
4984 
4985       /* If we have a PLUS whose second operand is a constant and the
4986 	 address is not valid, perhaps we can split it up using
4987 	 the machine-specific way to split large constants.  We use
4988 	 the first pseudo-reg (one of the virtual regs) as a placeholder;
4989 	 it will not remain in the result.  */
4990       if (GET_CODE (XEXP (x, 0)) == PLUS
4991 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4992 	  && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4993 					    MEM_ADDR_SPACE (x)))
4994 	{
4995 	  rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4996 	  rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4997 					       subst_insn);
4998 
4999 	  /* This should have produced two insns, each of which sets our
5000 	     placeholder.  If the source of the second is a valid address,
5001 	     we can put both sources together and make a split point
5002 	     in the middle.  */
5003 
5004 	  if (seq
5005 	      && NEXT_INSN (seq) != NULL_RTX
5006 	      && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
5007 	      && NONJUMP_INSN_P (seq)
5008 	      && GET_CODE (PATTERN (seq)) == SET
5009 	      && SET_DEST (PATTERN (seq)) == reg
5010 	      && ! reg_mentioned_p (reg,
5011 				    SET_SRC (PATTERN (seq)))
5012 	      && NONJUMP_INSN_P (NEXT_INSN (seq))
5013 	      && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
5014 	      && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
5015 	      && memory_address_addr_space_p
5016 		   (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
5017 		    MEM_ADDR_SPACE (x)))
5018 	    {
5019 	      rtx src1 = SET_SRC (PATTERN (seq));
5020 	      rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
5021 
5022 	      /* Replace the placeholder in SRC2 with SRC1.  If we can
5023 		 find where in SRC2 it was placed, that can become our
5024 		 split point and we can replace this address with SRC2.
5025 		 Just try two obvious places.  */
5026 
5027 	      src2 = replace_rtx (src2, reg, src1);
5028 	      split = 0;
5029 	      if (XEXP (src2, 0) == src1)
5030 		split = &XEXP (src2, 0);
5031 	      else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
5032 		       && XEXP (XEXP (src2, 0), 0) == src1)
5033 		split = &XEXP (XEXP (src2, 0), 0);
5034 
5035 	      if (split)
5036 		{
5037 		  SUBST (XEXP (x, 0), src2);
5038 		  return split;
5039 		}
5040 	    }
5041 
5042 	  /* If that didn't work and we have a nested plus, like:
5043 	     ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5044 	     is valid address, try to split (REG1 * CONST1).  */
5045 	  if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5046 	      && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5047 	      && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5048 	      && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
5049 		    && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5050 							 0), 0)))))
5051 	    {
5052 	      rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
5053 	      XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
5054 	      if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5055 					       MEM_ADDR_SPACE (x)))
5056 		{
5057 		  XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5058 		  return &XEXP (XEXP (XEXP (x, 0), 0), 0);
5059 		}
5060 	      XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5061 	    }
5062 	  else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5063 		   && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5064 		   && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5065 		   && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
5066 			 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5067 							      0), 1)))))
5068 	    {
5069 	      rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
5070 	      XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
5071 	      if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5072 					       MEM_ADDR_SPACE (x)))
5073 		{
5074 		  XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5075 		  return &XEXP (XEXP (XEXP (x, 0), 0), 1);
5076 		}
5077 	      XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5078 	    }
5079 
5080 	  /* If that didn't work, perhaps the first operand is complex and
5081 	     needs to be computed separately, so make a split point there.
5082 	     This will occur on machines that just support REG + CONST
5083 	     and have a constant moved through some previous computation.  */
5084 	  if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
5085 	      && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5086 		    && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5087 	    return &XEXP (XEXP (x, 0), 0);
5088 	}
5089 
5090       /* If we have a PLUS whose first operand is complex, try computing it
5091          separately by making a split there.  */
5092       if (GET_CODE (XEXP (x, 0)) == PLUS
5093           && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5094 					    MEM_ADDR_SPACE (x))
5095           && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
5096           && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5097                 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5098         return &XEXP (XEXP (x, 0), 0);
5099       break;
5100 
5101     case SET:
5102       /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5103 	 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5104 	 we need to put the operand into a register.  So split at that
5105 	 point.  */
5106 
5107       if (SET_DEST (x) == cc0_rtx
5108 	  && GET_CODE (SET_SRC (x)) != COMPARE
5109 	  && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5110 	  && !OBJECT_P (SET_SRC (x))
5111 	  && ! (GET_CODE (SET_SRC (x)) == SUBREG
5112 		&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5113 	return &SET_SRC (x);
5114 
5115       /* See if we can split SET_SRC as it stands.  */
5116       split = find_split_point (&SET_SRC (x), insn, true);
5117       if (split && split != &SET_SRC (x))
5118 	return split;
5119 
5120       /* See if we can split SET_DEST as it stands.  */
5121       split = find_split_point (&SET_DEST (x), insn, false);
5122       if (split && split != &SET_DEST (x))
5123 	return split;
5124 
5125       /* See if this is a bitfield assignment with everything constant.  If
5126 	 so, this is an IOR of an AND, so split it into that.  */
5127       if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5128 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5129 				     &inner_mode)
5130 	  && HWI_COMPUTABLE_MODE_P (inner_mode)
5131 	  && CONST_INT_P (XEXP (SET_DEST (x), 1))
5132 	  && CONST_INT_P (XEXP (SET_DEST (x), 2))
5133 	  && CONST_INT_P (SET_SRC (x))
5134 	  && ((INTVAL (XEXP (SET_DEST (x), 1))
5135 	       + INTVAL (XEXP (SET_DEST (x), 2)))
5136 	      <= GET_MODE_PRECISION (inner_mode))
5137 	  && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5138 	{
5139 	  HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5140 	  unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5141 	  rtx dest = XEXP (SET_DEST (x), 0);
5142 	  unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1;
5143 	  unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask;
5144 	  rtx or_mask;
5145 
5146 	  if (BITS_BIG_ENDIAN)
5147 	    pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5148 
5149 	  or_mask = gen_int_mode (src << pos, inner_mode);
5150 	  if (src == mask)
5151 	    SUBST (SET_SRC (x),
5152 		   simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5153 	  else
5154 	    {
5155 	      rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5156 	      SUBST (SET_SRC (x),
5157 		     simplify_gen_binary (IOR, inner_mode,
5158 					  simplify_gen_binary (AND, inner_mode,
5159 							       dest, negmask),
5160 					  or_mask));
5161 	    }
5162 
5163 	  SUBST (SET_DEST (x), dest);
5164 
5165 	  split = find_split_point (&SET_SRC (x), insn, true);
5166 	  if (split && split != &SET_SRC (x))
5167 	    return split;
5168 	}
5169 
5170       /* Otherwise, see if this is an operation that we can split into two.
5171 	 If so, try to split that.  */
5172       code = GET_CODE (SET_SRC (x));
5173 
5174       switch (code)
5175 	{
5176 	case AND:
5177 	  /* If we are AND'ing with a large constant that is only a single
5178 	     bit and the result is only being used in a context where we
5179 	     need to know if it is zero or nonzero, replace it with a bit
5180 	     extraction.  This will avoid the large constant, which might
5181 	     have taken more than one insn to make.  If the constant were
5182 	     not a valid argument to the AND but took only one insn to make,
5183 	     this is no worse, but if it took more than one insn, it will
5184 	     be better.  */
5185 
5186 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5187 	      && REG_P (XEXP (SET_SRC (x), 0))
5188 	      && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5189 	      && REG_P (SET_DEST (x))
5190 	      && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5191 	      && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5192 	      && XEXP (*split, 0) == SET_DEST (x)
5193 	      && XEXP (*split, 1) == const0_rtx)
5194 	    {
5195 	      rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5196 						XEXP (SET_SRC (x), 0),
5197 						pos, NULL_RTX, 1, 1, 0, 0);
5198 	      if (extraction != 0)
5199 		{
5200 		  SUBST (SET_SRC (x), extraction);
5201 		  return find_split_point (loc, insn, false);
5202 		}
5203 	    }
5204 	  break;
5205 
5206 	case NE:
5207 	  /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5208 	     is known to be on, this can be converted into a NEG of a shift.  */
5209 	  if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5210 	      && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5211 	      && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5212 						   GET_MODE (XEXP (SET_SRC (x),
5213 							     0))))) >= 1))
5214 	    {
5215 	      machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5216 	      rtx pos_rtx = gen_int_shift_amount (mode, pos);
5217 	      SUBST (SET_SRC (x),
5218 		     gen_rtx_NEG (mode,
5219 				  gen_rtx_LSHIFTRT (mode,
5220 						    XEXP (SET_SRC (x), 0),
5221 						    pos_rtx)));
5222 
5223 	      split = find_split_point (&SET_SRC (x), insn, true);
5224 	      if (split && split != &SET_SRC (x))
5225 		return split;
5226 	    }
5227 	  break;
5228 
5229 	case SIGN_EXTEND:
5230 	  inner = XEXP (SET_SRC (x), 0);
5231 
5232 	  /* We can't optimize if either mode is a partial integer
5233 	     mode as we don't know how many bits are significant
5234 	     in those modes.  */
5235 	  if (!is_int_mode (GET_MODE (inner), &inner_mode)
5236 	      || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5237 	    break;
5238 
5239 	  pos = 0;
5240 	  len = GET_MODE_PRECISION (inner_mode);
5241 	  unsignedp = 0;
5242 	  break;
5243 
5244 	case SIGN_EXTRACT:
5245 	case ZERO_EXTRACT:
5246 	  if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5247 				      &inner_mode)
5248 	      && CONST_INT_P (XEXP (SET_SRC (x), 1))
5249 	      && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5250 	    {
5251 	      inner = XEXP (SET_SRC (x), 0);
5252 	      len = INTVAL (XEXP (SET_SRC (x), 1));
5253 	      pos = INTVAL (XEXP (SET_SRC (x), 2));
5254 
5255 	      if (BITS_BIG_ENDIAN)
5256 		pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5257 	      unsignedp = (code == ZERO_EXTRACT);
5258 	    }
5259 	  break;
5260 
5261 	default:
5262 	  break;
5263 	}
5264 
5265       if (len
5266 	  && known_subrange_p (pos, len,
5267 			       0, GET_MODE_PRECISION (GET_MODE (inner)))
5268 	  && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5269 	{
5270 	  /* For unsigned, we have a choice of a shift followed by an
5271 	     AND or two shifts.  Use two shifts for field sizes where the
5272 	     constant might be too large.  We assume here that we can
5273 	     always at least get 8-bit constants in an AND insn, which is
5274 	     true for every current RISC.  */
5275 
5276 	  if (unsignedp && len <= 8)
5277 	    {
5278 	      unsigned HOST_WIDE_INT mask
5279 		= (HOST_WIDE_INT_1U << len) - 1;
5280 	      rtx pos_rtx = gen_int_shift_amount (mode, pos);
5281 	      SUBST (SET_SRC (x),
5282 		     gen_rtx_AND (mode,
5283 				  gen_rtx_LSHIFTRT
5284 				  (mode, gen_lowpart (mode, inner), pos_rtx),
5285 				  gen_int_mode (mask, mode)));
5286 
5287 	      split = find_split_point (&SET_SRC (x), insn, true);
5288 	      if (split && split != &SET_SRC (x))
5289 		return split;
5290 	    }
5291 	  else
5292 	    {
5293 	      int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5294 	      int right_bits = GET_MODE_PRECISION (mode) - len;
5295 	      SUBST (SET_SRC (x),
5296 		     gen_rtx_fmt_ee
5297 		     (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5298 		      gen_rtx_ASHIFT (mode,
5299 				      gen_lowpart (mode, inner),
5300 				      gen_int_shift_amount (mode, left_bits)),
5301 		      gen_int_shift_amount (mode, right_bits)));
5302 
5303 	      split = find_split_point (&SET_SRC (x), insn, true);
5304 	      if (split && split != &SET_SRC (x))
5305 		return split;
5306 	    }
5307 	}
5308 
5309       /* See if this is a simple operation with a constant as the second
5310 	 operand.  It might be that this constant is out of range and hence
5311 	 could be used as a split point.  */
5312       if (BINARY_P (SET_SRC (x))
5313 	  && CONSTANT_P (XEXP (SET_SRC (x), 1))
5314 	  && (OBJECT_P (XEXP (SET_SRC (x), 0))
5315 	      || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5316 		  && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5317 	return &XEXP (SET_SRC (x), 1);
5318 
5319       /* Finally, see if this is a simple operation with its first operand
5320 	 not in a register.  The operation might require this operand in a
5321 	 register, so return it as a split point.  We can always do this
5322 	 because if the first operand were another operation, we would have
5323 	 already found it as a split point.  */
5324       if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5325 	  && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5326 	return &XEXP (SET_SRC (x), 0);
5327 
5328       return 0;
5329 
5330     case AND:
5331     case IOR:
5332       /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5333 	 it is better to write this as (not (ior A B)) so we can split it.
5334 	 Similarly for IOR.  */
5335       if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5336 	{
5337 	  SUBST (*loc,
5338 		 gen_rtx_NOT (GET_MODE (x),
5339 			      gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5340 					      GET_MODE (x),
5341 					      XEXP (XEXP (x, 0), 0),
5342 					      XEXP (XEXP (x, 1), 0))));
5343 	  return find_split_point (loc, insn, set_src);
5344 	}
5345 
5346       /* Many RISC machines have a large set of logical insns.  If the
5347 	 second operand is a NOT, put it first so we will try to split the
5348 	 other operand first.  */
5349       if (GET_CODE (XEXP (x, 1)) == NOT)
5350 	{
5351 	  rtx tem = XEXP (x, 0);
5352 	  SUBST (XEXP (x, 0), XEXP (x, 1));
5353 	  SUBST (XEXP (x, 1), tem);
5354 	}
5355       break;
5356 
5357     case PLUS:
5358     case MINUS:
5359       /* Canonicalization can produce (minus A (mult B C)), where C is a
5360 	 constant.  It may be better to try splitting (plus (mult B -C) A)
5361 	 instead if this isn't a multiply by a power of two.  */
5362       if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5363 	  && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5364 	  && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5365 	{
5366 	  machine_mode mode = GET_MODE (x);
5367 	  unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5368 	  HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5369 	  SUBST (*loc, gen_rtx_PLUS (mode,
5370 				     gen_rtx_MULT (mode,
5371 						   XEXP (XEXP (x, 1), 0),
5372 						   gen_int_mode (other_int,
5373 								 mode)),
5374 				     XEXP (x, 0)));
5375 	  return find_split_point (loc, insn, set_src);
5376 	}
5377 
5378       /* Split at a multiply-accumulate instruction.  However if this is
5379          the SET_SRC, we likely do not have such an instruction and it's
5380          worthless to try this split.  */
5381       if (!set_src
5382 	  && (GET_CODE (XEXP (x, 0)) == MULT
5383 	      || (GET_CODE (XEXP (x, 0)) == ASHIFT
5384 		  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5385         return loc;
5386 
5387     default:
5388       break;
5389     }
5390 
5391   /* Otherwise, select our actions depending on our rtx class.  */
5392   switch (GET_RTX_CLASS (code))
5393     {
5394     case RTX_BITFIELD_OPS:		/* This is ZERO_EXTRACT and SIGN_EXTRACT.  */
5395     case RTX_TERNARY:
5396       split = find_split_point (&XEXP (x, 2), insn, false);
5397       if (split)
5398 	return split;
5399       /* fall through */
5400     case RTX_BIN_ARITH:
5401     case RTX_COMM_ARITH:
5402     case RTX_COMPARE:
5403     case RTX_COMM_COMPARE:
5404       split = find_split_point (&XEXP (x, 1), insn, false);
5405       if (split)
5406 	return split;
5407       /* fall through */
5408     case RTX_UNARY:
5409       /* Some machines have (and (shift ...) ...) insns.  If X is not
5410 	 an AND, but XEXP (X, 0) is, use it as our split point.  */
5411       if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5412 	return &XEXP (x, 0);
5413 
5414       split = find_split_point (&XEXP (x, 0), insn, false);
5415       if (split)
5416 	return split;
5417       return loc;
5418 
5419     default:
5420       /* Otherwise, we don't have a split point.  */
5421       return 0;
5422     }
5423 }
5424 
5425 /* Throughout X, replace FROM with TO, and return the result.
5426    The result is TO if X is FROM;
5427    otherwise the result is X, but its contents may have been modified.
5428    If they were modified, a record was made in undobuf so that
5429    undo_all will (among other things) return X to its original state.
5430 
5431    If the number of changes necessary is too much to record to undo,
5432    the excess changes are not made, so the result is invalid.
5433    The changes already made can still be undone.
5434    undobuf.num_undo is incremented for such changes, so by testing that
5435    the caller can tell whether the result is valid.
5436 
5437    `n_occurrences' is incremented each time FROM is replaced.
5438 
5439    IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5440 
5441    IN_COND is nonzero if we are at the top level of a condition.
5442 
5443    UNIQUE_COPY is nonzero if each substitution must be unique.  We do this
5444    by copying if `n_occurrences' is nonzero.  */
5445 
5446 static rtx
5447 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5448 {
5449   enum rtx_code code = GET_CODE (x);
5450   machine_mode op0_mode = VOIDmode;
5451   const char *fmt;
5452   int len, i;
5453   rtx new_rtx;
5454 
5455 /* Two expressions are equal if they are identical copies of a shared
5456    RTX or if they are both registers with the same register number
5457    and mode.  */
5458 
5459 #define COMBINE_RTX_EQUAL_P(X,Y)			\
5460   ((X) == (Y)						\
5461    || (REG_P (X) && REG_P (Y)	\
5462        && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5463 
5464   /* Do not substitute into clobbers of regs -- this will never result in
5465      valid RTL.  */
5466   if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5467     return x;
5468 
5469   if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5470     {
5471       n_occurrences++;
5472       return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5473     }
5474 
5475   /* If X and FROM are the same register but different modes, they
5476      will not have been seen as equal above.  However, the log links code
5477      will make a LOG_LINKS entry for that case.  If we do nothing, we
5478      will try to rerecognize our original insn and, when it succeeds,
5479      we will delete the feeding insn, which is incorrect.
5480 
5481      So force this insn not to match in this (rare) case.  */
5482   if (! in_dest && code == REG && REG_P (from)
5483       && reg_overlap_mentioned_p (x, from))
5484     return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5485 
5486   /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5487      of which may contain things that can be combined.  */
5488   if (code != MEM && code != LO_SUM && OBJECT_P (x))
5489     return x;
5490 
5491   /* It is possible to have a subexpression appear twice in the insn.
5492      Suppose that FROM is a register that appears within TO.
5493      Then, after that subexpression has been scanned once by `subst',
5494      the second time it is scanned, TO may be found.  If we were
5495      to scan TO here, we would find FROM within it and create a
5496      self-referent rtl structure which is completely wrong.  */
5497   if (COMBINE_RTX_EQUAL_P (x, to))
5498     return to;
5499 
5500   /* Parallel asm_operands need special attention because all of the
5501      inputs are shared across the arms.  Furthermore, unsharing the
5502      rtl results in recognition failures.  Failure to handle this case
5503      specially can result in circular rtl.
5504 
5505      Solve this by doing a normal pass across the first entry of the
5506      parallel, and only processing the SET_DESTs of the subsequent
5507      entries.  Ug.  */
5508 
5509   if (code == PARALLEL
5510       && GET_CODE (XVECEXP (x, 0, 0)) == SET
5511       && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5512     {
5513       new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5514 
5515       /* If this substitution failed, this whole thing fails.  */
5516       if (GET_CODE (new_rtx) == CLOBBER
5517 	  && XEXP (new_rtx, 0) == const0_rtx)
5518 	return new_rtx;
5519 
5520       SUBST (XVECEXP (x, 0, 0), new_rtx);
5521 
5522       for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5523 	{
5524 	  rtx dest = SET_DEST (XVECEXP (x, 0, i));
5525 
5526 	  if (!REG_P (dest)
5527 	      && GET_CODE (dest) != CC0
5528 	      && GET_CODE (dest) != PC)
5529 	    {
5530 	      new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5531 
5532 	      /* If this substitution failed, this whole thing fails.  */
5533 	      if (GET_CODE (new_rtx) == CLOBBER
5534 		  && XEXP (new_rtx, 0) == const0_rtx)
5535 		return new_rtx;
5536 
5537 	      SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5538 	    }
5539 	}
5540     }
5541   else
5542     {
5543       len = GET_RTX_LENGTH (code);
5544       fmt = GET_RTX_FORMAT (code);
5545 
5546       /* We don't need to process a SET_DEST that is a register, CC0,
5547 	 or PC, so set up to skip this common case.  All other cases
5548 	 where we want to suppress replacing something inside a
5549 	 SET_SRC are handled via the IN_DEST operand.  */
5550       if (code == SET
5551 	  && (REG_P (SET_DEST (x))
5552 	      || GET_CODE (SET_DEST (x)) == CC0
5553 	      || GET_CODE (SET_DEST (x)) == PC))
5554 	fmt = "ie";
5555 
5556       /* Trying to simplify the operands of a widening MULT is not likely
5557 	 to create RTL matching a machine insn.  */
5558       if (code == MULT
5559 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5560 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5561 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5562 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5563 	  && REG_P (XEXP (XEXP (x, 0), 0))
5564 	  && REG_P (XEXP (XEXP (x, 1), 0))
5565 	  && from == to)
5566 	return x;
5567 
5568 
5569       /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5570 	 constant.  */
5571       if (fmt[0] == 'e')
5572 	op0_mode = GET_MODE (XEXP (x, 0));
5573 
5574       for (i = 0; i < len; i++)
5575 	{
5576 	  if (fmt[i] == 'E')
5577 	    {
5578 	      int j;
5579 	      for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5580 		{
5581 		  if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5582 		    {
5583 		      new_rtx = (unique_copy && n_occurrences
5584 			     ? copy_rtx (to) : to);
5585 		      n_occurrences++;
5586 		    }
5587 		  else
5588 		    {
5589 		      new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5590 				       unique_copy);
5591 
5592 		      /* If this substitution failed, this whole thing
5593 			 fails.  */
5594 		      if (GET_CODE (new_rtx) == CLOBBER
5595 			  && XEXP (new_rtx, 0) == const0_rtx)
5596 			return new_rtx;
5597 		    }
5598 
5599 		  SUBST (XVECEXP (x, i, j), new_rtx);
5600 		}
5601 	    }
5602 	  else if (fmt[i] == 'e')
5603 	    {
5604 	      /* If this is a register being set, ignore it.  */
5605 	      new_rtx = XEXP (x, i);
5606 	      if (in_dest
5607 		  && i == 0
5608 		  && (((code == SUBREG || code == ZERO_EXTRACT)
5609 		       && REG_P (new_rtx))
5610 		      || code == STRICT_LOW_PART))
5611 		;
5612 
5613 	      else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5614 		{
5615 		  /* In general, don't install a subreg involving two
5616 		     modes not tieable.  It can worsen register
5617 		     allocation, and can even make invalid reload
5618 		     insns, since the reg inside may need to be copied
5619 		     from in the outside mode, and that may be invalid
5620 		     if it is an fp reg copied in integer mode.
5621 
5622 		     We allow two exceptions to this: It is valid if
5623 		     it is inside another SUBREG and the mode of that
5624 		     SUBREG and the mode of the inside of TO is
5625 		     tieable and it is valid if X is a SET that copies
5626 		     FROM to CC0.  */
5627 
5628 		  if (GET_CODE (to) == SUBREG
5629 		      && !targetm.modes_tieable_p (GET_MODE (to),
5630 						   GET_MODE (SUBREG_REG (to)))
5631 		      && ! (code == SUBREG
5632 			    && (targetm.modes_tieable_p
5633 				(GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5634 		      && (!HAVE_cc0
5635 			  || (! (code == SET
5636 				 && i == 1
5637 				 && XEXP (x, 0) == cc0_rtx))))
5638 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5639 
5640 		  if (code == SUBREG
5641 		      && REG_P (to)
5642 		      && REGNO (to) < FIRST_PSEUDO_REGISTER
5643 		      && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5644 						SUBREG_BYTE (x),
5645 						GET_MODE (x)) < 0)
5646 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5647 
5648 		  new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5649 		  n_occurrences++;
5650 		}
5651 	      else
5652 		/* If we are in a SET_DEST, suppress most cases unless we
5653 		   have gone inside a MEM, in which case we want to
5654 		   simplify the address.  We assume here that things that
5655 		   are actually part of the destination have their inner
5656 		   parts in the first expression.  This is true for SUBREG,
5657 		   STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5658 		   things aside from REG and MEM that should appear in a
5659 		   SET_DEST.  */
5660 		new_rtx = subst (XEXP (x, i), from, to,
5661 			     (((in_dest
5662 				&& (code == SUBREG || code == STRICT_LOW_PART
5663 				    || code == ZERO_EXTRACT))
5664 			       || code == SET)
5665 			      && i == 0),
5666 				 code == IF_THEN_ELSE && i == 0,
5667 				 unique_copy);
5668 
5669 	      /* If we found that we will have to reject this combination,
5670 		 indicate that by returning the CLOBBER ourselves, rather than
5671 		 an expression containing it.  This will speed things up as
5672 		 well as prevent accidents where two CLOBBERs are considered
5673 		 to be equal, thus producing an incorrect simplification.  */
5674 
5675 	      if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5676 		return new_rtx;
5677 
5678 	      if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5679 		{
5680 		  machine_mode mode = GET_MODE (x);
5681 
5682 		  x = simplify_subreg (GET_MODE (x), new_rtx,
5683 				       GET_MODE (SUBREG_REG (x)),
5684 				       SUBREG_BYTE (x));
5685 		  if (! x)
5686 		    x = gen_rtx_CLOBBER (mode, const0_rtx);
5687 		}
5688 	      else if (CONST_SCALAR_INT_P (new_rtx)
5689 		       && (GET_CODE (x) == ZERO_EXTEND
5690 			   || GET_CODE (x) == FLOAT
5691 			   || GET_CODE (x) == UNSIGNED_FLOAT))
5692 		{
5693 		  x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5694 						new_rtx,
5695 						GET_MODE (XEXP (x, 0)));
5696 		  if (!x)
5697 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5698 		}
5699 	      else
5700 		SUBST (XEXP (x, i), new_rtx);
5701 	    }
5702 	}
5703     }
5704 
5705   /* Check if we are loading something from the constant pool via float
5706      extension; in this case we would undo compress_float_constant
5707      optimization and degenerate constant load to an immediate value.  */
5708   if (GET_CODE (x) == FLOAT_EXTEND
5709       && MEM_P (XEXP (x, 0))
5710       && MEM_READONLY_P (XEXP (x, 0)))
5711     {
5712       rtx tmp = avoid_constant_pool_reference (x);
5713       if (x != tmp)
5714         return x;
5715     }
5716 
5717   /* Try to simplify X.  If the simplification changed the code, it is likely
5718      that further simplification will help, so loop, but limit the number
5719      of repetitions that will be performed.  */
5720 
5721   for (i = 0; i < 4; i++)
5722     {
5723       /* If X is sufficiently simple, don't bother trying to do anything
5724 	 with it.  */
5725       if (code != CONST_INT && code != REG && code != CLOBBER)
5726 	x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5727 
5728       if (GET_CODE (x) == code)
5729 	break;
5730 
5731       code = GET_CODE (x);
5732 
5733       /* We no longer know the original mode of operand 0 since we
5734 	 have changed the form of X)  */
5735       op0_mode = VOIDmode;
5736     }
5737 
5738   return x;
5739 }
5740 
5741 /* If X is a commutative operation whose operands are not in the canonical
5742    order, use substitutions to swap them.  */
5743 
5744 static void
5745 maybe_swap_commutative_operands (rtx x)
5746 {
5747   if (COMMUTATIVE_ARITH_P (x)
5748       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5749     {
5750       rtx temp = XEXP (x, 0);
5751       SUBST (XEXP (x, 0), XEXP (x, 1));
5752       SUBST (XEXP (x, 1), temp);
5753     }
5754 }
5755 
5756 /* Simplify X, a piece of RTL.  We just operate on the expression at the
5757    outer level; call `subst' to simplify recursively.  Return the new
5758    expression.
5759 
5760    OP0_MODE is the original mode of XEXP (x, 0).  IN_DEST is nonzero
5761    if we are inside a SET_DEST.  IN_COND is nonzero if we are at the top level
5762    of a condition.  */
5763 
5764 static rtx
5765 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5766 		      int in_cond)
5767 {
5768   enum rtx_code code = GET_CODE (x);
5769   machine_mode mode = GET_MODE (x);
5770   scalar_int_mode int_mode;
5771   rtx temp;
5772   int i;
5773 
5774   /* If this is a commutative operation, put a constant last and a complex
5775      expression first.  We don't need to do this for comparisons here.  */
5776   maybe_swap_commutative_operands (x);
5777 
5778   /* Try to fold this expression in case we have constants that weren't
5779      present before.  */
5780   temp = 0;
5781   switch (GET_RTX_CLASS (code))
5782     {
5783     case RTX_UNARY:
5784       if (op0_mode == VOIDmode)
5785 	op0_mode = GET_MODE (XEXP (x, 0));
5786       temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5787       break;
5788     case RTX_COMPARE:
5789     case RTX_COMM_COMPARE:
5790       {
5791 	machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5792 	if (cmp_mode == VOIDmode)
5793 	  {
5794 	    cmp_mode = GET_MODE (XEXP (x, 1));
5795 	    if (cmp_mode == VOIDmode)
5796 	      cmp_mode = op0_mode;
5797 	  }
5798 	temp = simplify_relational_operation (code, mode, cmp_mode,
5799 					      XEXP (x, 0), XEXP (x, 1));
5800       }
5801       break;
5802     case RTX_COMM_ARITH:
5803     case RTX_BIN_ARITH:
5804       temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5805       break;
5806     case RTX_BITFIELD_OPS:
5807     case RTX_TERNARY:
5808       temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5809 					 XEXP (x, 1), XEXP (x, 2));
5810       break;
5811     default:
5812       break;
5813     }
5814 
5815   if (temp)
5816     {
5817       x = temp;
5818       code = GET_CODE (temp);
5819       op0_mode = VOIDmode;
5820       mode = GET_MODE (temp);
5821     }
5822 
5823   /* If this is a simple operation applied to an IF_THEN_ELSE, try
5824      applying it to the arms of the IF_THEN_ELSE.  This often simplifies
5825      things.  Check for cases where both arms are testing the same
5826      condition.
5827 
5828      Don't do anything if all operands are very simple.  */
5829 
5830   if ((BINARY_P (x)
5831        && ((!OBJECT_P (XEXP (x, 0))
5832 	    && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5833 		  && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5834 	   || (!OBJECT_P (XEXP (x, 1))
5835 	       && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5836 		     && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5837       || (UNARY_P (x)
5838 	  && (!OBJECT_P (XEXP (x, 0))
5839 	       && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5840 		     && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5841     {
5842       rtx cond, true_rtx, false_rtx;
5843 
5844       cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5845       if (cond != 0
5846 	  /* If everything is a comparison, what we have is highly unlikely
5847 	     to be simpler, so don't use it.  */
5848 	  && ! (COMPARISON_P (x)
5849 		&& (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5850 	  /* Similarly, if we end up with one of the expressions the same
5851 	     as the original, it is certainly not simpler.  */
5852 	  && ! rtx_equal_p (x, true_rtx)
5853 	  && ! rtx_equal_p (x, false_rtx))
5854 	{
5855 	  rtx cop1 = const0_rtx;
5856 	  enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5857 
5858 	  if (cond_code == NE && COMPARISON_P (cond))
5859 	    return x;
5860 
5861 	  /* Simplify the alternative arms; this may collapse the true and
5862 	     false arms to store-flag values.  Be careful to use copy_rtx
5863 	     here since true_rtx or false_rtx might share RTL with x as a
5864 	     result of the if_then_else_cond call above.  */
5865 	  true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5866 	  false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5867 
5868 	  /* If true_rtx and false_rtx are not general_operands, an if_then_else
5869 	     is unlikely to be simpler.  */
5870 	  if (general_operand (true_rtx, VOIDmode)
5871 	      && general_operand (false_rtx, VOIDmode))
5872 	    {
5873 	      enum rtx_code reversed;
5874 
5875 	      /* Restarting if we generate a store-flag expression will cause
5876 		 us to loop.  Just drop through in this case.  */
5877 
5878 	      /* If the result values are STORE_FLAG_VALUE and zero, we can
5879 		 just make the comparison operation.  */
5880 	      if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5881 		x = simplify_gen_relational (cond_code, mode, VOIDmode,
5882 					     cond, cop1);
5883 	      else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5884 		       && ((reversed = reversed_comparison_code_parts
5885 					(cond_code, cond, cop1, NULL))
5886 			   != UNKNOWN))
5887 		x = simplify_gen_relational (reversed, mode, VOIDmode,
5888 					     cond, cop1);
5889 
5890 	      /* Likewise, we can make the negate of a comparison operation
5891 		 if the result values are - STORE_FLAG_VALUE and zero.  */
5892 	      else if (CONST_INT_P (true_rtx)
5893 		       && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5894 		       && false_rtx == const0_rtx)
5895 		x = simplify_gen_unary (NEG, mode,
5896 					simplify_gen_relational (cond_code,
5897 								 mode, VOIDmode,
5898 								 cond, cop1),
5899 					mode);
5900 	      else if (CONST_INT_P (false_rtx)
5901 		       && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5902 		       && true_rtx == const0_rtx
5903 		       && ((reversed = reversed_comparison_code_parts
5904 					(cond_code, cond, cop1, NULL))
5905 			   != UNKNOWN))
5906 		x = simplify_gen_unary (NEG, mode,
5907 					simplify_gen_relational (reversed,
5908 								 mode, VOIDmode,
5909 								 cond, cop1),
5910 					mode);
5911 	      else
5912 		return gen_rtx_IF_THEN_ELSE (mode,
5913 					     simplify_gen_relational (cond_code,
5914 								      mode,
5915 								      VOIDmode,
5916 								      cond,
5917 								      cop1),
5918 					     true_rtx, false_rtx);
5919 
5920 	      code = GET_CODE (x);
5921 	      op0_mode = VOIDmode;
5922 	    }
5923 	}
5924     }
5925 
5926   /* First see if we can apply the inverse distributive law.  */
5927   if (code == PLUS || code == MINUS
5928       || code == AND || code == IOR || code == XOR)
5929     {
5930       x = apply_distributive_law (x);
5931       code = GET_CODE (x);
5932       op0_mode = VOIDmode;
5933     }
5934 
5935   /* If CODE is an associative operation not otherwise handled, see if we
5936      can associate some operands.  This can win if they are constants or
5937      if they are logically related (i.e. (a & b) & a).  */
5938   if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5939        || code == AND || code == IOR || code == XOR
5940        || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5941       && ((INTEGRAL_MODE_P (mode) && code != DIV)
5942 	  || (flag_associative_math && FLOAT_MODE_P (mode))))
5943     {
5944       if (GET_CODE (XEXP (x, 0)) == code)
5945 	{
5946 	  rtx other = XEXP (XEXP (x, 0), 0);
5947 	  rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5948 	  rtx inner_op1 = XEXP (x, 1);
5949 	  rtx inner;
5950 
5951 	  /* Make sure we pass the constant operand if any as the second
5952 	     one if this is a commutative operation.  */
5953 	  if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5954 	    std::swap (inner_op0, inner_op1);
5955 	  inner = simplify_binary_operation (code == MINUS ? PLUS
5956 					     : code == DIV ? MULT
5957 					     : code,
5958 					     mode, inner_op0, inner_op1);
5959 
5960 	  /* For commutative operations, try the other pair if that one
5961 	     didn't simplify.  */
5962 	  if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5963 	    {
5964 	      other = XEXP (XEXP (x, 0), 1);
5965 	      inner = simplify_binary_operation (code, mode,
5966 						 XEXP (XEXP (x, 0), 0),
5967 						 XEXP (x, 1));
5968 	    }
5969 
5970 	  if (inner)
5971 	    return simplify_gen_binary (code, mode, other, inner);
5972 	}
5973     }
5974 
5975   /* A little bit of algebraic simplification here.  */
5976   switch (code)
5977     {
5978     case MEM:
5979       /* Ensure that our address has any ASHIFTs converted to MULT in case
5980 	 address-recognizing predicates are called later.  */
5981       temp = make_compound_operation (XEXP (x, 0), MEM);
5982       SUBST (XEXP (x, 0), temp);
5983       break;
5984 
5985     case SUBREG:
5986       if (op0_mode == VOIDmode)
5987 	op0_mode = GET_MODE (SUBREG_REG (x));
5988 
5989       /* See if this can be moved to simplify_subreg.  */
5990       if (CONSTANT_P (SUBREG_REG (x))
5991 	  && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5992 	     /* Don't call gen_lowpart if the inner mode
5993 		is VOIDmode and we cannot simplify it, as SUBREG without
5994 		inner mode is invalid.  */
5995 	  && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5996 	      || gen_lowpart_common (mode, SUBREG_REG (x))))
5997 	return gen_lowpart (mode, SUBREG_REG (x));
5998 
5999       if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
6000 	break;
6001       {
6002 	rtx temp;
6003 	temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
6004 				SUBREG_BYTE (x));
6005 	if (temp)
6006 	  return temp;
6007 
6008 	/* If op is known to have all lower bits zero, the result is zero.  */
6009 	scalar_int_mode int_mode, int_op0_mode;
6010 	if (!in_dest
6011 	    && is_a <scalar_int_mode> (mode, &int_mode)
6012 	    && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
6013 	    && (GET_MODE_PRECISION (int_mode)
6014 		< GET_MODE_PRECISION (int_op0_mode))
6015 	    && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
6016 			 SUBREG_BYTE (x))
6017 	    && HWI_COMPUTABLE_MODE_P (int_op0_mode)
6018 	    && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
6019 		 & GET_MODE_MASK (int_mode)) == 0)
6020 	    && !side_effects_p (SUBREG_REG (x)))
6021 	  return CONST0_RTX (int_mode);
6022       }
6023 
6024       /* Don't change the mode of the MEM if that would change the meaning
6025 	 of the address.  */
6026       if (MEM_P (SUBREG_REG (x))
6027 	  && (MEM_VOLATILE_P (SUBREG_REG (x))
6028 	      || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
6029 					   MEM_ADDR_SPACE (SUBREG_REG (x)))))
6030 	return gen_rtx_CLOBBER (mode, const0_rtx);
6031 
6032       /* Note that we cannot do any narrowing for non-constants since
6033 	 we might have been counting on using the fact that some bits were
6034 	 zero.  We now do this in the SET.  */
6035 
6036       break;
6037 
6038     case NEG:
6039       temp = expand_compound_operation (XEXP (x, 0));
6040 
6041       /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6042 	 replaced by (lshiftrt X C).  This will convert
6043 	 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y).  */
6044 
6045       if (GET_CODE (temp) == ASHIFTRT
6046 	  && CONST_INT_P (XEXP (temp, 1))
6047 	  && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
6048 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
6049 				     INTVAL (XEXP (temp, 1)));
6050 
6051       /* If X has only a single bit that might be nonzero, say, bit I, convert
6052 	 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6053 	 MODE minus 1.  This will convert (neg (zero_extract X 1 Y)) to
6054 	 (sign_extract X 1 Y).  But only do this if TEMP isn't a register
6055 	 or a SUBREG of one since we'd be making the expression more
6056 	 complex if it was just a register.  */
6057 
6058       if (!REG_P (temp)
6059 	  && ! (GET_CODE (temp) == SUBREG
6060 		&& REG_P (SUBREG_REG (temp)))
6061 	  && is_a <scalar_int_mode> (mode, &int_mode)
6062 	  && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
6063 	{
6064 	  rtx temp1 = simplify_shift_const
6065 	    (NULL_RTX, ASHIFTRT, int_mode,
6066 	     simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
6067 				   GET_MODE_PRECISION (int_mode) - 1 - i),
6068 	     GET_MODE_PRECISION (int_mode) - 1 - i);
6069 
6070 	  /* If all we did was surround TEMP with the two shifts, we
6071 	     haven't improved anything, so don't use it.  Otherwise,
6072 	     we are better off with TEMP1.  */
6073 	  if (GET_CODE (temp1) != ASHIFTRT
6074 	      || GET_CODE (XEXP (temp1, 0)) != ASHIFT
6075 	      || XEXP (XEXP (temp1, 0), 0) != temp)
6076 	    return temp1;
6077 	}
6078       break;
6079 
6080     case TRUNCATE:
6081       /* We can't handle truncation to a partial integer mode here
6082 	 because we don't know the real bitsize of the partial
6083 	 integer mode.  */
6084       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
6085 	break;
6086 
6087       if (HWI_COMPUTABLE_MODE_P (mode))
6088 	SUBST (XEXP (x, 0),
6089 	       force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6090 			      GET_MODE_MASK (mode), 0));
6091 
6092       /* We can truncate a constant value and return it.  */
6093       {
6094 	poly_int64 c;
6095 	if (poly_int_rtx_p (XEXP (x, 0), &c))
6096 	  return gen_int_mode (c, mode);
6097       }
6098 
6099       /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6100 	 whose value is a comparison can be replaced with a subreg if
6101 	 STORE_FLAG_VALUE permits.  */
6102       if (HWI_COMPUTABLE_MODE_P (mode)
6103 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6104 	  && (temp = get_last_value (XEXP (x, 0)))
6105 	  && COMPARISON_P (temp))
6106 	return gen_lowpart (mode, XEXP (x, 0));
6107       break;
6108 
6109     case CONST:
6110       /* (const (const X)) can become (const X).  Do it this way rather than
6111 	 returning the inner CONST since CONST can be shared with a
6112 	 REG_EQUAL note.  */
6113       if (GET_CODE (XEXP (x, 0)) == CONST)
6114 	SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6115       break;
6116 
6117     case LO_SUM:
6118       /* Convert (lo_sum (high FOO) FOO) to FOO.  This is necessary so we
6119 	 can add in an offset.  find_split_point will split this address up
6120 	 again if it doesn't match.  */
6121       if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6122 	  && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6123 	return XEXP (x, 1);
6124       break;
6125 
6126     case PLUS:
6127       /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6128 	 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6129 	 bit-field and can be replaced by either a sign_extend or a
6130 	 sign_extract.  The `and' may be a zero_extend and the two
6131 	 <c>, -<c> constants may be reversed.  */
6132       if (GET_CODE (XEXP (x, 0)) == XOR
6133 	  && is_a <scalar_int_mode> (mode, &int_mode)
6134 	  && CONST_INT_P (XEXP (x, 1))
6135 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6136 	  && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6137 	  && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6138 	      || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6139 	  && HWI_COMPUTABLE_MODE_P (int_mode)
6140 	  && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6141 	       && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6142 	       && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6143 		   == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6144 	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6145 		  && known_eq ((GET_MODE_PRECISION
6146 				(GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6147 			       (unsigned int) i + 1))))
6148 	return simplify_shift_const
6149 	  (NULL_RTX, ASHIFTRT, int_mode,
6150 	   simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6151 				 XEXP (XEXP (XEXP (x, 0), 0), 0),
6152 				 GET_MODE_PRECISION (int_mode) - (i + 1)),
6153 	   GET_MODE_PRECISION (int_mode) - (i + 1));
6154 
6155       /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6156 	 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6157 	 the bitsize of the mode - 1.  This allows simplification of
6158 	 "a = (b & 8) == 0;"  */
6159       if (XEXP (x, 1) == constm1_rtx
6160 	  && !REG_P (XEXP (x, 0))
6161 	  && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6162 		&& REG_P (SUBREG_REG (XEXP (x, 0))))
6163 	  && is_a <scalar_int_mode> (mode, &int_mode)
6164 	  && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6165 	return simplify_shift_const
6166 	  (NULL_RTX, ASHIFTRT, int_mode,
6167 	   simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6168 				 gen_rtx_XOR (int_mode, XEXP (x, 0),
6169 					      const1_rtx),
6170 				 GET_MODE_PRECISION (int_mode) - 1),
6171 	   GET_MODE_PRECISION (int_mode) - 1);
6172 
6173       /* If we are adding two things that have no bits in common, convert
6174 	 the addition into an IOR.  This will often be further simplified,
6175 	 for example in cases like ((a & 1) + (a & 2)), which can
6176 	 become a & 3.  */
6177 
6178       if (HWI_COMPUTABLE_MODE_P (mode)
6179 	  && (nonzero_bits (XEXP (x, 0), mode)
6180 	      & nonzero_bits (XEXP (x, 1), mode)) == 0)
6181 	{
6182 	  /* Try to simplify the expression further.  */
6183 	  rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6184 	  temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6185 
6186 	  /* If we could, great.  If not, do not go ahead with the IOR
6187 	     replacement, since PLUS appears in many special purpose
6188 	     address arithmetic instructions.  */
6189 	  if (GET_CODE (temp) != CLOBBER
6190 	      && (GET_CODE (temp) != IOR
6191 		  || ((XEXP (temp, 0) != XEXP (x, 0)
6192 		       || XEXP (temp, 1) != XEXP (x, 1))
6193 		      && (XEXP (temp, 0) != XEXP (x, 1)
6194 			  || XEXP (temp, 1) != XEXP (x, 0)))))
6195 	    return temp;
6196 	}
6197 
6198       /* Canonicalize x + x into x << 1.  */
6199       if (GET_MODE_CLASS (mode) == MODE_INT
6200 	  && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6201 	  && !side_effects_p (XEXP (x, 0)))
6202 	return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6203 
6204       break;
6205 
6206     case MINUS:
6207       /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6208 	 (and <foo> (const_int pow2-1))  */
6209       if (is_a <scalar_int_mode> (mode, &int_mode)
6210 	  && GET_CODE (XEXP (x, 1)) == AND
6211 	  && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6212 	  && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6213 	  && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6214 	return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6215 				       -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6216       break;
6217 
6218     case MULT:
6219       /* If we have (mult (plus A B) C), apply the distributive law and then
6220 	 the inverse distributive law to see if things simplify.  This
6221 	 occurs mostly in addresses, often when unrolling loops.  */
6222 
6223       if (GET_CODE (XEXP (x, 0)) == PLUS)
6224 	{
6225 	  rtx result = distribute_and_simplify_rtx (x, 0);
6226 	  if (result)
6227 	    return result;
6228 	}
6229 
6230       /* Try simplify a*(b/c) as (a*b)/c.  */
6231       if (FLOAT_MODE_P (mode) && flag_associative_math
6232 	  && GET_CODE (XEXP (x, 0)) == DIV)
6233 	{
6234 	  rtx tem = simplify_binary_operation (MULT, mode,
6235 					       XEXP (XEXP (x, 0), 0),
6236 					       XEXP (x, 1));
6237 	  if (tem)
6238 	    return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6239 	}
6240       break;
6241 
6242     case UDIV:
6243       /* If this is a divide by a power of two, treat it as a shift if
6244 	 its first operand is a shift.  */
6245       if (is_a <scalar_int_mode> (mode, &int_mode)
6246 	  && CONST_INT_P (XEXP (x, 1))
6247 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6248 	  && (GET_CODE (XEXP (x, 0)) == ASHIFT
6249 	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6250 	      || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6251 	      || GET_CODE (XEXP (x, 0)) == ROTATE
6252 	      || GET_CODE (XEXP (x, 0)) == ROTATERT))
6253 	return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6254 				     XEXP (x, 0), i);
6255       break;
6256 
6257     case EQ:  case NE:
6258     case GT:  case GTU:  case GE:  case GEU:
6259     case LT:  case LTU:  case LE:  case LEU:
6260     case UNEQ:  case LTGT:
6261     case UNGT:  case UNGE:
6262     case UNLT:  case UNLE:
6263     case UNORDERED: case ORDERED:
6264       /* If the first operand is a condition code, we can't do anything
6265 	 with it.  */
6266       if (GET_CODE (XEXP (x, 0)) == COMPARE
6267 	  || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6268 	      && ! CC0_P (XEXP (x, 0))))
6269 	{
6270 	  rtx op0 = XEXP (x, 0);
6271 	  rtx op1 = XEXP (x, 1);
6272 	  enum rtx_code new_code;
6273 
6274 	  if (GET_CODE (op0) == COMPARE)
6275 	    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6276 
6277 	  /* Simplify our comparison, if possible.  */
6278 	  new_code = simplify_comparison (code, &op0, &op1);
6279 
6280 	  /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6281 	     if only the low-order bit is possibly nonzero in X (such as when
6282 	     X is a ZERO_EXTRACT of one bit).  Similarly, we can convert EQ to
6283 	     (xor X 1) or (minus 1 X); we use the former.  Finally, if X is
6284 	     known to be either 0 or -1, NE becomes a NEG and EQ becomes
6285 	     (plus X 1).
6286 
6287 	     Remove any ZERO_EXTRACT we made when thinking this was a
6288 	     comparison.  It may now be simpler to use, e.g., an AND.  If a
6289 	     ZERO_EXTRACT is indeed appropriate, it will be placed back by
6290 	     the call to make_compound_operation in the SET case.
6291 
6292 	     Don't apply these optimizations if the caller would
6293 	     prefer a comparison rather than a value.
6294 	     E.g., for the condition in an IF_THEN_ELSE most targets need
6295 	     an explicit comparison.  */
6296 
6297 	  if (in_cond)
6298 	    ;
6299 
6300 	  else if (STORE_FLAG_VALUE == 1
6301 		   && new_code == NE
6302 		   && is_int_mode (mode, &int_mode)
6303 		   && op1 == const0_rtx
6304 		   && int_mode == GET_MODE (op0)
6305 		   && nonzero_bits (op0, int_mode) == 1)
6306 	    return gen_lowpart (int_mode,
6307 				expand_compound_operation (op0));
6308 
6309 	  else if (STORE_FLAG_VALUE == 1
6310 		   && new_code == NE
6311 		   && is_int_mode (mode, &int_mode)
6312 		   && op1 == const0_rtx
6313 		   && int_mode == GET_MODE (op0)
6314 		   && (num_sign_bit_copies (op0, int_mode)
6315 		       == GET_MODE_PRECISION (int_mode)))
6316 	    {
6317 	      op0 = expand_compound_operation (op0);
6318 	      return simplify_gen_unary (NEG, int_mode,
6319 					 gen_lowpart (int_mode, op0),
6320 					 int_mode);
6321 	    }
6322 
6323 	  else if (STORE_FLAG_VALUE == 1
6324 		   && new_code == EQ
6325 		   && is_int_mode (mode, &int_mode)
6326 		   && op1 == const0_rtx
6327 		   && int_mode == GET_MODE (op0)
6328 		   && nonzero_bits (op0, int_mode) == 1)
6329 	    {
6330 	      op0 = expand_compound_operation (op0);
6331 	      return simplify_gen_binary (XOR, int_mode,
6332 					  gen_lowpart (int_mode, op0),
6333 					  const1_rtx);
6334 	    }
6335 
6336 	  else if (STORE_FLAG_VALUE == 1
6337 		   && new_code == EQ
6338 		   && is_int_mode (mode, &int_mode)
6339 		   && op1 == const0_rtx
6340 		   && int_mode == GET_MODE (op0)
6341 		   && (num_sign_bit_copies (op0, int_mode)
6342 		       == GET_MODE_PRECISION (int_mode)))
6343 	    {
6344 	      op0 = expand_compound_operation (op0);
6345 	      return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6346 	    }
6347 
6348 	  /* If STORE_FLAG_VALUE is -1, we have cases similar to
6349 	     those above.  */
6350 	  if (in_cond)
6351 	    ;
6352 
6353 	  else if (STORE_FLAG_VALUE == -1
6354 		   && new_code == NE
6355 		   && is_int_mode (mode, &int_mode)
6356 		   && op1 == const0_rtx
6357 		   && int_mode == GET_MODE (op0)
6358 		   && (num_sign_bit_copies (op0, int_mode)
6359 		       == GET_MODE_PRECISION (int_mode)))
6360 	    return gen_lowpart (int_mode, expand_compound_operation (op0));
6361 
6362 	  else if (STORE_FLAG_VALUE == -1
6363 		   && new_code == NE
6364 		   && is_int_mode (mode, &int_mode)
6365 		   && op1 == const0_rtx
6366 		   && int_mode == GET_MODE (op0)
6367 		   && nonzero_bits (op0, int_mode) == 1)
6368 	    {
6369 	      op0 = expand_compound_operation (op0);
6370 	      return simplify_gen_unary (NEG, int_mode,
6371 					 gen_lowpart (int_mode, op0),
6372 					 int_mode);
6373 	    }
6374 
6375 	  else if (STORE_FLAG_VALUE == -1
6376 		   && new_code == EQ
6377 		   && is_int_mode (mode, &int_mode)
6378 		   && op1 == const0_rtx
6379 		   && int_mode == GET_MODE (op0)
6380 		   && (num_sign_bit_copies (op0, int_mode)
6381 		       == GET_MODE_PRECISION (int_mode)))
6382 	    {
6383 	      op0 = expand_compound_operation (op0);
6384 	      return simplify_gen_unary (NOT, int_mode,
6385 					 gen_lowpart (int_mode, op0),
6386 					 int_mode);
6387 	    }
6388 
6389 	  /* If X is 0/1, (eq X 0) is X-1.  */
6390 	  else if (STORE_FLAG_VALUE == -1
6391 		   && new_code == EQ
6392 		   && is_int_mode (mode, &int_mode)
6393 		   && op1 == const0_rtx
6394 		   && int_mode == GET_MODE (op0)
6395 		   && nonzero_bits (op0, int_mode) == 1)
6396 	    {
6397 	      op0 = expand_compound_operation (op0);
6398 	      return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6399 	    }
6400 
6401 	  /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6402 	     one bit that might be nonzero, we can convert (ne x 0) to
6403 	     (ashift x c) where C puts the bit in the sign bit.  Remove any
6404 	     AND with STORE_FLAG_VALUE when we are done, since we are only
6405 	     going to test the sign bit.  */
6406 	  if (new_code == NE
6407 	      && is_int_mode (mode, &int_mode)
6408 	      && HWI_COMPUTABLE_MODE_P (int_mode)
6409 	      && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6410 	      && op1 == const0_rtx
6411 	      && int_mode == GET_MODE (op0)
6412 	      && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6413 	    {
6414 	      x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6415 					expand_compound_operation (op0),
6416 					GET_MODE_PRECISION (int_mode) - 1 - i);
6417 	      if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6418 		return XEXP (x, 0);
6419 	      else
6420 		return x;
6421 	    }
6422 
6423 	  /* If the code changed, return a whole new comparison.
6424 	     We also need to avoid using SUBST in cases where
6425 	     simplify_comparison has widened a comparison with a CONST_INT,
6426 	     since in that case the wider CONST_INT may fail the sanity
6427 	     checks in do_SUBST.  */
6428 	  if (new_code != code
6429 	      || (CONST_INT_P (op1)
6430 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6431 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6432 	    return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6433 
6434 	  /* Otherwise, keep this operation, but maybe change its operands.
6435 	     This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR).  */
6436 	  SUBST (XEXP (x, 0), op0);
6437 	  SUBST (XEXP (x, 1), op1);
6438 	}
6439       break;
6440 
6441     case IF_THEN_ELSE:
6442       return simplify_if_then_else (x);
6443 
6444     case ZERO_EXTRACT:
6445     case SIGN_EXTRACT:
6446     case ZERO_EXTEND:
6447     case SIGN_EXTEND:
6448       /* If we are processing SET_DEST, we are done.  */
6449       if (in_dest)
6450 	return x;
6451 
6452       return expand_compound_operation (x);
6453 
6454     case SET:
6455       return simplify_set (x);
6456 
6457     case AND:
6458     case IOR:
6459       return simplify_logical (x);
6460 
6461     case ASHIFT:
6462     case LSHIFTRT:
6463     case ASHIFTRT:
6464     case ROTATE:
6465     case ROTATERT:
6466       /* If this is a shift by a constant amount, simplify it.  */
6467       if (CONST_INT_P (XEXP (x, 1)))
6468 	return simplify_shift_const (x, code, mode, XEXP (x, 0),
6469 				     INTVAL (XEXP (x, 1)));
6470 
6471       else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6472 	SUBST (XEXP (x, 1),
6473 	       force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6474 			      (HOST_WIDE_INT_1U
6475 			       << exact_log2 (GET_MODE_UNIT_BITSIZE
6476 					      (GET_MODE (x))))
6477 			      - 1,
6478 			      0));
6479       break;
6480 
6481     default:
6482       break;
6483     }
6484 
6485   return x;
6486 }
6487 
6488 /* Simplify X, an IF_THEN_ELSE expression.  Return the new expression.  */
6489 
6490 static rtx
6491 simplify_if_then_else (rtx x)
6492 {
6493   machine_mode mode = GET_MODE (x);
6494   rtx cond = XEXP (x, 0);
6495   rtx true_rtx = XEXP (x, 1);
6496   rtx false_rtx = XEXP (x, 2);
6497   enum rtx_code true_code = GET_CODE (cond);
6498   int comparison_p = COMPARISON_P (cond);
6499   rtx temp;
6500   int i;
6501   enum rtx_code false_code;
6502   rtx reversed;
6503   scalar_int_mode int_mode, inner_mode;
6504 
6505   /* Simplify storing of the truth value.  */
6506   if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6507     return simplify_gen_relational (true_code, mode, VOIDmode,
6508 				    XEXP (cond, 0), XEXP (cond, 1));
6509 
6510   /* Also when the truth value has to be reversed.  */
6511   if (comparison_p
6512       && true_rtx == const0_rtx && false_rtx == const_true_rtx
6513       && (reversed = reversed_comparison (cond, mode)))
6514     return reversed;
6515 
6516   /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6517      in it is being compared against certain values.  Get the true and false
6518      comparisons and see if that says anything about the value of each arm.  */
6519 
6520   if (comparison_p
6521       && ((false_code = reversed_comparison_code (cond, NULL))
6522 	  != UNKNOWN)
6523       && REG_P (XEXP (cond, 0)))
6524     {
6525       HOST_WIDE_INT nzb;
6526       rtx from = XEXP (cond, 0);
6527       rtx true_val = XEXP (cond, 1);
6528       rtx false_val = true_val;
6529       int swapped = 0;
6530 
6531       /* If FALSE_CODE is EQ, swap the codes and arms.  */
6532 
6533       if (false_code == EQ)
6534 	{
6535 	  swapped = 1, true_code = EQ, false_code = NE;
6536 	  std::swap (true_rtx, false_rtx);
6537 	}
6538 
6539       scalar_int_mode from_mode;
6540       if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6541 	{
6542 	  /* If we are comparing against zero and the expression being
6543 	     tested has only a single bit that might be nonzero, that is
6544 	     its value when it is not equal to zero.  Similarly if it is
6545 	     known to be -1 or 0.  */
6546 	  if (true_code == EQ
6547 	      && true_val == const0_rtx
6548 	      && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6549 	    {
6550 	      false_code = EQ;
6551 	      false_val = gen_int_mode (nzb, from_mode);
6552 	    }
6553 	  else if (true_code == EQ
6554 		   && true_val == const0_rtx
6555 		   && (num_sign_bit_copies (from, from_mode)
6556 		       == GET_MODE_PRECISION (from_mode)))
6557 	    {
6558 	      false_code = EQ;
6559 	      false_val = constm1_rtx;
6560 	    }
6561 	}
6562 
6563       /* Now simplify an arm if we know the value of the register in the
6564 	 branch and it is used in the arm.  Be careful due to the potential
6565 	 of locally-shared RTL.  */
6566 
6567       if (reg_mentioned_p (from, true_rtx))
6568 	true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6569 				      from, true_val),
6570 			  pc_rtx, pc_rtx, 0, 0, 0);
6571       if (reg_mentioned_p (from, false_rtx))
6572 	false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6573 				       from, false_val),
6574 			   pc_rtx, pc_rtx, 0, 0, 0);
6575 
6576       SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6577       SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6578 
6579       true_rtx = XEXP (x, 1);
6580       false_rtx = XEXP (x, 2);
6581       true_code = GET_CODE (cond);
6582     }
6583 
6584   /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6585      reversed, do so to avoid needing two sets of patterns for
6586      subtract-and-branch insns.  Similarly if we have a constant in the true
6587      arm, the false arm is the same as the first operand of the comparison, or
6588      the false arm is more complicated than the true arm.  */
6589 
6590   if (comparison_p
6591       && reversed_comparison_code (cond, NULL) != UNKNOWN
6592       && (true_rtx == pc_rtx
6593 	  || (CONSTANT_P (true_rtx)
6594 	      && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6595 	  || true_rtx == const0_rtx
6596 	  || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6597 	  || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6598 	      && !OBJECT_P (false_rtx))
6599 	  || reg_mentioned_p (true_rtx, false_rtx)
6600 	  || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6601     {
6602       true_code = reversed_comparison_code (cond, NULL);
6603       SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6604       SUBST (XEXP (x, 1), false_rtx);
6605       SUBST (XEXP (x, 2), true_rtx);
6606 
6607       std::swap (true_rtx, false_rtx);
6608       cond = XEXP (x, 0);
6609 
6610       /* It is possible that the conditional has been simplified out.  */
6611       true_code = GET_CODE (cond);
6612       comparison_p = COMPARISON_P (cond);
6613     }
6614 
6615   /* If the two arms are identical, we don't need the comparison.  */
6616 
6617   if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6618     return true_rtx;
6619 
6620   /* Convert a == b ? b : a to "a".  */
6621   if (true_code == EQ && ! side_effects_p (cond)
6622       && !HONOR_NANS (mode)
6623       && rtx_equal_p (XEXP (cond, 0), false_rtx)
6624       && rtx_equal_p (XEXP (cond, 1), true_rtx))
6625     return false_rtx;
6626   else if (true_code == NE && ! side_effects_p (cond)
6627 	   && !HONOR_NANS (mode)
6628 	   && rtx_equal_p (XEXP (cond, 0), true_rtx)
6629 	   && rtx_equal_p (XEXP (cond, 1), false_rtx))
6630     return true_rtx;
6631 
6632   /* Look for cases where we have (abs x) or (neg (abs X)).  */
6633 
6634   if (GET_MODE_CLASS (mode) == MODE_INT
6635       && comparison_p
6636       && XEXP (cond, 1) == const0_rtx
6637       && GET_CODE (false_rtx) == NEG
6638       && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6639       && rtx_equal_p (true_rtx, XEXP (cond, 0))
6640       && ! side_effects_p (true_rtx))
6641     switch (true_code)
6642       {
6643       case GT:
6644       case GE:
6645 	return simplify_gen_unary (ABS, mode, true_rtx, mode);
6646       case LT:
6647       case LE:
6648 	return
6649 	  simplify_gen_unary (NEG, mode,
6650 			      simplify_gen_unary (ABS, mode, true_rtx, mode),
6651 			      mode);
6652       default:
6653 	break;
6654       }
6655 
6656   /* Look for MIN or MAX.  */
6657 
6658   if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6659       && comparison_p
6660       && rtx_equal_p (XEXP (cond, 0), true_rtx)
6661       && rtx_equal_p (XEXP (cond, 1), false_rtx)
6662       && ! side_effects_p (cond))
6663     switch (true_code)
6664       {
6665       case GE:
6666       case GT:
6667 	return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6668       case LE:
6669       case LT:
6670 	return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6671       case GEU:
6672       case GTU:
6673 	return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6674       case LEU:
6675       case LTU:
6676 	return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6677       default:
6678 	break;
6679       }
6680 
6681   /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6682      second operand is zero, this can be done as (OP Z (mult COND C2)) where
6683      C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6684      SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6685      We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6686      neither 1 or -1, but it isn't worth checking for.  */
6687 
6688   if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6689       && comparison_p
6690       && is_int_mode (mode, &int_mode)
6691       && ! side_effects_p (x))
6692     {
6693       rtx t = make_compound_operation (true_rtx, SET);
6694       rtx f = make_compound_operation (false_rtx, SET);
6695       rtx cond_op0 = XEXP (cond, 0);
6696       rtx cond_op1 = XEXP (cond, 1);
6697       enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6698       scalar_int_mode m = int_mode;
6699       rtx z = 0, c1 = NULL_RTX;
6700 
6701       if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6702 	   || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6703 	   || GET_CODE (t) == ASHIFT
6704 	   || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6705 	  && rtx_equal_p (XEXP (t, 0), f))
6706 	c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6707 
6708       /* If an identity-zero op is commutative, check whether there
6709 	 would be a match if we swapped the operands.  */
6710       else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6711 		|| GET_CODE (t) == XOR)
6712 	       && rtx_equal_p (XEXP (t, 1), f))
6713 	c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6714       else if (GET_CODE (t) == SIGN_EXTEND
6715 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6716 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6717 		   || GET_CODE (XEXP (t, 0)) == MINUS
6718 		   || GET_CODE (XEXP (t, 0)) == IOR
6719 		   || GET_CODE (XEXP (t, 0)) == XOR
6720 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6721 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6722 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6723 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6724 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6725 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6726 	       && (num_sign_bit_copies (f, GET_MODE (f))
6727 		   > (unsigned int)
6728 		     (GET_MODE_PRECISION (int_mode)
6729 		      - GET_MODE_PRECISION (inner_mode))))
6730 	{
6731 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6732 	  extend_op = SIGN_EXTEND;
6733 	  m = inner_mode;
6734 	}
6735       else if (GET_CODE (t) == SIGN_EXTEND
6736 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6737 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6738 		   || GET_CODE (XEXP (t, 0)) == IOR
6739 		   || GET_CODE (XEXP (t, 0)) == XOR)
6740 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6741 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6742 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6743 	       && (num_sign_bit_copies (f, GET_MODE (f))
6744 		   > (unsigned int)
6745 		     (GET_MODE_PRECISION (int_mode)
6746 		      - GET_MODE_PRECISION (inner_mode))))
6747 	{
6748 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6749 	  extend_op = SIGN_EXTEND;
6750 	  m = inner_mode;
6751 	}
6752       else if (GET_CODE (t) == ZERO_EXTEND
6753 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6754 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6755 		   || GET_CODE (XEXP (t, 0)) == MINUS
6756 		   || GET_CODE (XEXP (t, 0)) == IOR
6757 		   || GET_CODE (XEXP (t, 0)) == XOR
6758 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6759 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6760 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6761 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6762 	       && HWI_COMPUTABLE_MODE_P (int_mode)
6763 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6764 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6765 	       && ((nonzero_bits (f, GET_MODE (f))
6766 		    & ~GET_MODE_MASK (inner_mode))
6767 		   == 0))
6768 	{
6769 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6770 	  extend_op = ZERO_EXTEND;
6771 	  m = inner_mode;
6772 	}
6773       else if (GET_CODE (t) == ZERO_EXTEND
6774 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6775 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6776 		   || GET_CODE (XEXP (t, 0)) == IOR
6777 		   || GET_CODE (XEXP (t, 0)) == XOR)
6778 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6779 	       && HWI_COMPUTABLE_MODE_P (int_mode)
6780 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6781 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6782 	       && ((nonzero_bits (f, GET_MODE (f))
6783 		    & ~GET_MODE_MASK (inner_mode))
6784 		   == 0))
6785 	{
6786 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6787 	  extend_op = ZERO_EXTEND;
6788 	  m = inner_mode;
6789 	}
6790 
6791       if (z)
6792 	{
6793 	  machine_mode cm = m;
6794 	  if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6795 	      && GET_MODE (c1) != VOIDmode)
6796 	    cm = GET_MODE (c1);
6797 	  temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6798 						 cond_op0, cond_op1),
6799 			pc_rtx, pc_rtx, 0, 0, 0);
6800 	  temp = simplify_gen_binary (MULT, cm, temp,
6801 				      simplify_gen_binary (MULT, cm, c1,
6802 							   const_true_rtx));
6803 	  temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6804 	  temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6805 
6806 	  if (extend_op != UNKNOWN)
6807 	    temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6808 
6809 	  return temp;
6810 	}
6811     }
6812 
6813   /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6814      1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6815      negation of a single bit, we can convert this operation to a shift.  We
6816      can actually do this more generally, but it doesn't seem worth it.  */
6817 
6818   if (true_code == NE
6819       && is_a <scalar_int_mode> (mode, &int_mode)
6820       && XEXP (cond, 1) == const0_rtx
6821       && false_rtx == const0_rtx
6822       && CONST_INT_P (true_rtx)
6823       && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6824 	   && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6825 	  || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6826 	       == GET_MODE_PRECISION (int_mode))
6827 	      && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6828     return
6829       simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6830 			    gen_lowpart (int_mode, XEXP (cond, 0)), i);
6831 
6832   /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6833      non-zero bit in A is C1.  */
6834   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6835       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6836       && is_a <scalar_int_mode> (mode, &int_mode)
6837       && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6838       && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6839 	  == nonzero_bits (XEXP (cond, 0), inner_mode)
6840       && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6841     {
6842       rtx val = XEXP (cond, 0);
6843       if (inner_mode == int_mode)
6844         return val;
6845       else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6846         return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6847     }
6848 
6849   return x;
6850 }
6851 
6852 /* Simplify X, a SET expression.  Return the new expression.  */
6853 
6854 static rtx
6855 simplify_set (rtx x)
6856 {
6857   rtx src = SET_SRC (x);
6858   rtx dest = SET_DEST (x);
6859   machine_mode mode
6860     = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6861   rtx_insn *other_insn;
6862   rtx *cc_use;
6863   scalar_int_mode int_mode;
6864 
6865   /* (set (pc) (return)) gets written as (return).  */
6866   if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6867     return src;
6868 
6869   /* Now that we know for sure which bits of SRC we are using, see if we can
6870      simplify the expression for the object knowing that we only need the
6871      low-order bits.  */
6872 
6873   if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6874     {
6875       src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6876       SUBST (SET_SRC (x), src);
6877     }
6878 
6879   /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6880      the comparison result and try to simplify it unless we already have used
6881      undobuf.other_insn.  */
6882   if ((GET_MODE_CLASS (mode) == MODE_CC
6883        || GET_CODE (src) == COMPARE
6884        || CC0_P (dest))
6885       && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6886       && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6887       && COMPARISON_P (*cc_use)
6888       && rtx_equal_p (XEXP (*cc_use, 0), dest))
6889     {
6890       enum rtx_code old_code = GET_CODE (*cc_use);
6891       enum rtx_code new_code;
6892       rtx op0, op1, tmp;
6893       int other_changed = 0;
6894       rtx inner_compare = NULL_RTX;
6895       machine_mode compare_mode = GET_MODE (dest);
6896 
6897       if (GET_CODE (src) == COMPARE)
6898 	{
6899 	  op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6900 	  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6901 	    {
6902 	      inner_compare = op0;
6903 	      op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6904 	    }
6905 	}
6906       else
6907 	op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6908 
6909       tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6910 					   op0, op1);
6911       if (!tmp)
6912 	new_code = old_code;
6913       else if (!CONSTANT_P (tmp))
6914 	{
6915 	  new_code = GET_CODE (tmp);
6916 	  op0 = XEXP (tmp, 0);
6917 	  op1 = XEXP (tmp, 1);
6918 	}
6919       else
6920 	{
6921 	  rtx pat = PATTERN (other_insn);
6922 	  undobuf.other_insn = other_insn;
6923 	  SUBST (*cc_use, tmp);
6924 
6925 	  /* Attempt to simplify CC user.  */
6926 	  if (GET_CODE (pat) == SET)
6927 	    {
6928 	      rtx new_rtx = simplify_rtx (SET_SRC (pat));
6929 	      if (new_rtx != NULL_RTX)
6930 		SUBST (SET_SRC (pat), new_rtx);
6931 	    }
6932 
6933 	  /* Convert X into a no-op move.  */
6934 	  SUBST (SET_DEST (x), pc_rtx);
6935 	  SUBST (SET_SRC (x), pc_rtx);
6936 	  return x;
6937 	}
6938 
6939       /* Simplify our comparison, if possible.  */
6940       new_code = simplify_comparison (new_code, &op0, &op1);
6941 
6942 #ifdef SELECT_CC_MODE
6943       /* If this machine has CC modes other than CCmode, check to see if we
6944 	 need to use a different CC mode here.  */
6945       if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6946 	compare_mode = GET_MODE (op0);
6947       else if (inner_compare
6948 	       && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6949 	       && new_code == old_code
6950 	       && op0 == XEXP (inner_compare, 0)
6951 	       && op1 == XEXP (inner_compare, 1))
6952 	compare_mode = GET_MODE (inner_compare);
6953       else
6954 	compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6955 
6956       /* If the mode changed, we have to change SET_DEST, the mode in the
6957 	 compare, and the mode in the place SET_DEST is used.  If SET_DEST is
6958 	 a hard register, just build new versions with the proper mode.  If it
6959 	 is a pseudo, we lose unless it is only time we set the pseudo, in
6960 	 which case we can safely change its mode.  */
6961       if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6962 	{
6963 	  if (can_change_dest_mode (dest, 0, compare_mode))
6964 	    {
6965 	      unsigned int regno = REGNO (dest);
6966 	      rtx new_dest;
6967 
6968 	      if (regno < FIRST_PSEUDO_REGISTER)
6969 		new_dest = gen_rtx_REG (compare_mode, regno);
6970 	      else
6971 		{
6972 		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6973 		  new_dest = regno_reg_rtx[regno];
6974 		}
6975 
6976 	      SUBST (SET_DEST (x), new_dest);
6977 	      SUBST (XEXP (*cc_use, 0), new_dest);
6978 	      other_changed = 1;
6979 
6980 	      dest = new_dest;
6981 	    }
6982 	}
6983 #endif  /* SELECT_CC_MODE */
6984 
6985       /* If the code changed, we have to build a new comparison in
6986 	 undobuf.other_insn.  */
6987       if (new_code != old_code)
6988 	{
6989 	  int other_changed_previously = other_changed;
6990 	  unsigned HOST_WIDE_INT mask;
6991 	  rtx old_cc_use = *cc_use;
6992 
6993 	  SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6994 					  dest, const0_rtx));
6995 	  other_changed = 1;
6996 
6997 	  /* If the only change we made was to change an EQ into an NE or
6998 	     vice versa, OP0 has only one bit that might be nonzero, and OP1
6999 	     is zero, check if changing the user of the condition code will
7000 	     produce a valid insn.  If it won't, we can keep the original code
7001 	     in that insn by surrounding our operation with an XOR.  */
7002 
7003 	  if (((old_code == NE && new_code == EQ)
7004 	       || (old_code == EQ && new_code == NE))
7005 	      && ! other_changed_previously && op1 == const0_rtx
7006 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
7007 	      && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
7008 	    {
7009 	      rtx pat = PATTERN (other_insn), note = 0;
7010 
7011 	      if ((recog_for_combine (&pat, other_insn, &note) < 0
7012 		   && ! check_asm_operands (pat)))
7013 		{
7014 		  *cc_use = old_cc_use;
7015 		  other_changed = 0;
7016 
7017 		  op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
7018 					     gen_int_mode (mask,
7019 							   GET_MODE (op0)));
7020 		}
7021 	    }
7022 	}
7023 
7024       if (other_changed)
7025 	undobuf.other_insn = other_insn;
7026 
7027       /* Don't generate a compare of a CC with 0, just use that CC.  */
7028       if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
7029 	{
7030 	  SUBST (SET_SRC (x), op0);
7031 	  src = SET_SRC (x);
7032 	}
7033       /* Otherwise, if we didn't previously have the same COMPARE we
7034 	 want, create it from scratch.  */
7035       else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
7036 	       || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
7037 	{
7038 	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
7039 	  src = SET_SRC (x);
7040 	}
7041     }
7042   else
7043     {
7044       /* Get SET_SRC in a form where we have placed back any
7045 	 compound expressions.  Then do the checks below.  */
7046       src = make_compound_operation (src, SET);
7047       SUBST (SET_SRC (x), src);
7048     }
7049 
7050   /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7051      and X being a REG or (subreg (reg)), we may be able to convert this to
7052      (set (subreg:m2 x) (op)).
7053 
7054      We can always do this if M1 is narrower than M2 because that means that
7055      we only care about the low bits of the result.
7056 
7057      However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7058      perform a narrower operation than requested since the high-order bits will
7059      be undefined.  On machine where it is defined, this transformation is safe
7060      as long as M1 and M2 have the same number of words.  */
7061 
7062   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
7063       && !OBJECT_P (SUBREG_REG (src))
7064       && (known_equal_after_align_up
7065 	  (GET_MODE_SIZE (GET_MODE (src)),
7066 	   GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
7067 	   UNITS_PER_WORD))
7068       && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
7069       && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
7070 	    && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
7071 				       GET_MODE (SUBREG_REG (src)),
7072 				       GET_MODE (src)))
7073       && (REG_P (dest)
7074 	  || (GET_CODE (dest) == SUBREG
7075 	      && REG_P (SUBREG_REG (dest)))))
7076     {
7077       SUBST (SET_DEST (x),
7078 	     gen_lowpart (GET_MODE (SUBREG_REG (src)),
7079 				      dest));
7080       SUBST (SET_SRC (x), SUBREG_REG (src));
7081 
7082       src = SET_SRC (x), dest = SET_DEST (x);
7083     }
7084 
7085   /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7086      in SRC.  */
7087   if (dest == cc0_rtx
7088       && partial_subreg_p (src)
7089       && subreg_lowpart_p (src))
7090     {
7091       rtx inner = SUBREG_REG (src);
7092       machine_mode inner_mode = GET_MODE (inner);
7093 
7094       /* Here we make sure that we don't have a sign bit on.  */
7095       if (val_signbit_known_clear_p (GET_MODE (src),
7096 				     nonzero_bits (inner, inner_mode)))
7097 	{
7098 	  SUBST (SET_SRC (x), inner);
7099 	  src = SET_SRC (x);
7100 	}
7101     }
7102 
7103   /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7104      would require a paradoxical subreg.  Replace the subreg with a
7105      zero_extend to avoid the reload that would otherwise be required.
7106      Don't do this unless we have a scalar integer mode, otherwise the
7107      transformation is incorrect.  */
7108 
7109   enum rtx_code extend_op;
7110   if (paradoxical_subreg_p (src)
7111       && MEM_P (SUBREG_REG (src))
7112       && SCALAR_INT_MODE_P (GET_MODE (src))
7113       && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7114     {
7115       SUBST (SET_SRC (x),
7116 	     gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7117 
7118       src = SET_SRC (x);
7119     }
7120 
7121   /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7122      are comparing an item known to be 0 or -1 against 0, use a logical
7123      operation instead. Check for one of the arms being an IOR of the other
7124      arm with some value.  We compute three terms to be IOR'ed together.  In
7125      practice, at most two will be nonzero.  Then we do the IOR's.  */
7126 
7127   if (GET_CODE (dest) != PC
7128       && GET_CODE (src) == IF_THEN_ELSE
7129       && is_int_mode (GET_MODE (src), &int_mode)
7130       && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7131       && XEXP (XEXP (src, 0), 1) == const0_rtx
7132       && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7133       && (!HAVE_conditional_move
7134 	  || ! can_conditionally_move_p (int_mode))
7135       && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7136 	  == GET_MODE_PRECISION (int_mode))
7137       && ! side_effects_p (src))
7138     {
7139       rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7140 		      ? XEXP (src, 1) : XEXP (src, 2));
7141       rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7142 		   ? XEXP (src, 2) : XEXP (src, 1));
7143       rtx term1 = const0_rtx, term2, term3;
7144 
7145       if (GET_CODE (true_rtx) == IOR
7146 	  && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7147 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7148       else if (GET_CODE (true_rtx) == IOR
7149 	       && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7150 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7151       else if (GET_CODE (false_rtx) == IOR
7152 	       && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7153 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7154       else if (GET_CODE (false_rtx) == IOR
7155 	       && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7156 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7157 
7158       term2 = simplify_gen_binary (AND, int_mode,
7159 				   XEXP (XEXP (src, 0), 0), true_rtx);
7160       term3 = simplify_gen_binary (AND, int_mode,
7161 				   simplify_gen_unary (NOT, int_mode,
7162 						       XEXP (XEXP (src, 0), 0),
7163 						       int_mode),
7164 				   false_rtx);
7165 
7166       SUBST (SET_SRC (x),
7167 	     simplify_gen_binary (IOR, int_mode,
7168 				  simplify_gen_binary (IOR, int_mode,
7169 						       term1, term2),
7170 				  term3));
7171 
7172       src = SET_SRC (x);
7173     }
7174 
7175   /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7176      whole thing fail.  */
7177   if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7178     return src;
7179   else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7180     return dest;
7181   else
7182     /* Convert this into a field assignment operation, if possible.  */
7183     return make_field_assignment (x);
7184 }
7185 
7186 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7187    result.  */
7188 
7189 static rtx
7190 simplify_logical (rtx x)
7191 {
7192   rtx op0 = XEXP (x, 0);
7193   rtx op1 = XEXP (x, 1);
7194   scalar_int_mode mode;
7195 
7196   switch (GET_CODE (x))
7197     {
7198     case AND:
7199       /* We can call simplify_and_const_int only if we don't lose
7200 	 any (sign) bits when converting INTVAL (op1) to
7201 	 "unsigned HOST_WIDE_INT".  */
7202       if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7203 	  && CONST_INT_P (op1)
7204 	  && (HWI_COMPUTABLE_MODE_P (mode)
7205 	      || INTVAL (op1) > 0))
7206 	{
7207 	  x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7208 	  if (GET_CODE (x) != AND)
7209 	    return x;
7210 
7211 	  op0 = XEXP (x, 0);
7212 	  op1 = XEXP (x, 1);
7213 	}
7214 
7215       /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7216 	 apply the distributive law and then the inverse distributive
7217 	 law to see if things simplify.  */
7218       if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7219 	{
7220 	  rtx result = distribute_and_simplify_rtx (x, 0);
7221 	  if (result)
7222 	    return result;
7223 	}
7224       if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7225 	{
7226 	  rtx result = distribute_and_simplify_rtx (x, 1);
7227 	  if (result)
7228 	    return result;
7229 	}
7230       break;
7231 
7232     case IOR:
7233       /* If we have (ior (and A B) C), apply the distributive law and then
7234 	 the inverse distributive law to see if things simplify.  */
7235 
7236       if (GET_CODE (op0) == AND)
7237 	{
7238 	  rtx result = distribute_and_simplify_rtx (x, 0);
7239 	  if (result)
7240 	    return result;
7241 	}
7242 
7243       if (GET_CODE (op1) == AND)
7244 	{
7245 	  rtx result = distribute_and_simplify_rtx (x, 1);
7246 	  if (result)
7247 	    return result;
7248 	}
7249       break;
7250 
7251     default:
7252       gcc_unreachable ();
7253     }
7254 
7255   return x;
7256 }
7257 
7258 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7259    operations" because they can be replaced with two more basic operations.
7260    ZERO_EXTEND is also considered "compound" because it can be replaced with
7261    an AND operation, which is simpler, though only one operation.
7262 
7263    The function expand_compound_operation is called with an rtx expression
7264    and will convert it to the appropriate shifts and AND operations,
7265    simplifying at each stage.
7266 
7267    The function make_compound_operation is called to convert an expression
7268    consisting of shifts and ANDs into the equivalent compound expression.
7269    It is the inverse of this function, loosely speaking.  */
7270 
7271 static rtx
7272 expand_compound_operation (rtx x)
7273 {
7274   unsigned HOST_WIDE_INT pos = 0, len;
7275   int unsignedp = 0;
7276   unsigned int modewidth;
7277   rtx tem;
7278   scalar_int_mode inner_mode;
7279 
7280   switch (GET_CODE (x))
7281     {
7282     case ZERO_EXTEND:
7283       unsignedp = 1;
7284       /* FALLTHRU */
7285     case SIGN_EXTEND:
7286       /* We can't necessarily use a const_int for a multiword mode;
7287 	 it depends on implicitly extending the value.
7288 	 Since we don't know the right way to extend it,
7289 	 we can't tell whether the implicit way is right.
7290 
7291 	 Even for a mode that is no wider than a const_int,
7292 	 we can't win, because we need to sign extend one of its bits through
7293 	 the rest of it, and we don't know which bit.  */
7294       if (CONST_INT_P (XEXP (x, 0)))
7295 	return x;
7296 
7297       /* Reject modes that aren't scalar integers because turning vector
7298 	 or complex modes into shifts causes problems.  */
7299       if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7300 	return x;
7301 
7302       /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7303 	 (zero_extend:MODE FROM) or (sign_extend:MODE FROM).  It is for any MEM
7304 	 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7305 	 reloaded. If not for that, MEM's would very rarely be safe.
7306 
7307 	 Reject modes bigger than a word, because we might not be able
7308 	 to reference a two-register group starting with an arbitrary register
7309 	 (and currently gen_lowpart might crash for a SUBREG).  */
7310 
7311       if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7312 	return x;
7313 
7314       len = GET_MODE_PRECISION (inner_mode);
7315       /* If the inner object has VOIDmode (the only way this can happen
7316 	 is if it is an ASM_OPERANDS), we can't do anything since we don't
7317 	 know how much masking to do.  */
7318       if (len == 0)
7319 	return x;
7320 
7321       break;
7322 
7323     case ZERO_EXTRACT:
7324       unsignedp = 1;
7325 
7326       /* fall through */
7327 
7328     case SIGN_EXTRACT:
7329       /* If the operand is a CLOBBER, just return it.  */
7330       if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7331 	return XEXP (x, 0);
7332 
7333       if (!CONST_INT_P (XEXP (x, 1))
7334 	  || !CONST_INT_P (XEXP (x, 2)))
7335 	return x;
7336 
7337       /* Reject modes that aren't scalar integers because turning vector
7338 	 or complex modes into shifts causes problems.  */
7339       if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7340 	return x;
7341 
7342       len = INTVAL (XEXP (x, 1));
7343       pos = INTVAL (XEXP (x, 2));
7344 
7345       /* This should stay within the object being extracted, fail otherwise.  */
7346       if (len + pos > GET_MODE_PRECISION (inner_mode))
7347 	return x;
7348 
7349       if (BITS_BIG_ENDIAN)
7350 	pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7351 
7352       break;
7353 
7354     default:
7355       return x;
7356     }
7357 
7358   /* We've rejected non-scalar operations by now.  */
7359   scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7360 
7361   /* Convert sign extension to zero extension, if we know that the high
7362      bit is not set, as this is easier to optimize.  It will be converted
7363      back to cheaper alternative in make_extraction.  */
7364   if (GET_CODE (x) == SIGN_EXTEND
7365       && HWI_COMPUTABLE_MODE_P (mode)
7366       && ((nonzero_bits (XEXP (x, 0), inner_mode)
7367 	   & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7368 	  == 0))
7369     {
7370       rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7371       rtx temp2 = expand_compound_operation (temp);
7372 
7373       /* Make sure this is a profitable operation.  */
7374       if (set_src_cost (x, mode, optimize_this_for_speed_p)
7375           > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7376        return temp2;
7377       else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7378                > set_src_cost (temp, mode, optimize_this_for_speed_p))
7379        return temp;
7380       else
7381        return x;
7382     }
7383 
7384   /* We can optimize some special cases of ZERO_EXTEND.  */
7385   if (GET_CODE (x) == ZERO_EXTEND)
7386     {
7387       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7388 	 know that the last value didn't have any inappropriate bits
7389 	 set.  */
7390       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7391 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7392 	  && HWI_COMPUTABLE_MODE_P (mode)
7393 	  && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7394 	      & ~GET_MODE_MASK (inner_mode)) == 0)
7395 	return XEXP (XEXP (x, 0), 0);
7396 
7397       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7398       if (GET_CODE (XEXP (x, 0)) == SUBREG
7399 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7400 	  && subreg_lowpart_p (XEXP (x, 0))
7401 	  && HWI_COMPUTABLE_MODE_P (mode)
7402 	  && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7403 	      & ~GET_MODE_MASK (inner_mode)) == 0)
7404 	return SUBREG_REG (XEXP (x, 0));
7405 
7406       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7407 	 is a comparison and STORE_FLAG_VALUE permits.  This is like
7408 	 the first case, but it works even when MODE is larger
7409 	 than HOST_WIDE_INT.  */
7410       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7411 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7412 	  && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7413 	  && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7414 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7415 	return XEXP (XEXP (x, 0), 0);
7416 
7417       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7418       if (GET_CODE (XEXP (x, 0)) == SUBREG
7419 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7420 	  && subreg_lowpart_p (XEXP (x, 0))
7421 	  && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7422 	  && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7423 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7424 	return SUBREG_REG (XEXP (x, 0));
7425 
7426     }
7427 
7428   /* If we reach here, we want to return a pair of shifts.  The inner
7429      shift is a left shift of BITSIZE - POS - LEN bits.  The outer
7430      shift is a right shift of BITSIZE - LEN bits.  It is arithmetic or
7431      logical depending on the value of UNSIGNEDP.
7432 
7433      If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7434      converted into an AND of a shift.
7435 
7436      We must check for the case where the left shift would have a negative
7437      count.  This can happen in a case like (x >> 31) & 255 on machines
7438      that can't shift by a constant.  On those machines, we would first
7439      combine the shift with the AND to produce a variable-position
7440      extraction.  Then the constant of 31 would be substituted in
7441      to produce such a position.  */
7442 
7443   modewidth = GET_MODE_PRECISION (mode);
7444   if (modewidth >= pos + len)
7445     {
7446       tem = gen_lowpart (mode, XEXP (x, 0));
7447       if (!tem || GET_CODE (tem) == CLOBBER)
7448 	return x;
7449       tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7450 				  tem, modewidth - pos - len);
7451       tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7452 				  mode, tem, modewidth - len);
7453     }
7454   else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7455     tem = simplify_and_const_int (NULL_RTX, mode,
7456 				  simplify_shift_const (NULL_RTX, LSHIFTRT,
7457 							mode, XEXP (x, 0),
7458 							pos),
7459 				  (HOST_WIDE_INT_1U << len) - 1);
7460   else
7461     /* Any other cases we can't handle.  */
7462     return x;
7463 
7464   /* If we couldn't do this for some reason, return the original
7465      expression.  */
7466   if (GET_CODE (tem) == CLOBBER)
7467     return x;
7468 
7469   return tem;
7470 }
7471 
7472 /* X is a SET which contains an assignment of one object into
7473    a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7474    or certain SUBREGS). If possible, convert it into a series of
7475    logical operations.
7476 
7477    We half-heartedly support variable positions, but do not at all
7478    support variable lengths.  */
7479 
7480 static const_rtx
7481 expand_field_assignment (const_rtx x)
7482 {
7483   rtx inner;
7484   rtx pos;			/* Always counts from low bit.  */
7485   int len, inner_len;
7486   rtx mask, cleared, masked;
7487   scalar_int_mode compute_mode;
7488 
7489   /* Loop until we find something we can't simplify.  */
7490   while (1)
7491     {
7492       if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7493 	  && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7494 	{
7495 	  rtx x0 = XEXP (SET_DEST (x), 0);
7496 	  if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7497 	    break;
7498 	  inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7499 	  pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7500 			      MAX_MODE_INT);
7501 	}
7502       else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7503 	       && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7504 	{
7505 	  inner = XEXP (SET_DEST (x), 0);
7506 	  if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7507 	    break;
7508 
7509 	  len = INTVAL (XEXP (SET_DEST (x), 1));
7510 	  pos = XEXP (SET_DEST (x), 2);
7511 
7512 	  /* A constant position should stay within the width of INNER.  */
7513 	  if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7514 	    break;
7515 
7516 	  if (BITS_BIG_ENDIAN)
7517 	    {
7518 	      if (CONST_INT_P (pos))
7519 		pos = GEN_INT (inner_len - len - INTVAL (pos));
7520 	      else if (GET_CODE (pos) == MINUS
7521 		       && CONST_INT_P (XEXP (pos, 1))
7522 		       && INTVAL (XEXP (pos, 1)) == inner_len - len)
7523 		/* If position is ADJUST - X, new position is X.  */
7524 		pos = XEXP (pos, 0);
7525 	      else
7526 		pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7527 					   gen_int_mode (inner_len - len,
7528 							 GET_MODE (pos)),
7529 					   pos);
7530 	    }
7531 	}
7532 
7533       /* If the destination is a subreg that overwrites the whole of the inner
7534 	 register, we can move the subreg to the source.  */
7535       else if (GET_CODE (SET_DEST (x)) == SUBREG
7536 	       /* We need SUBREGs to compute nonzero_bits properly.  */
7537 	       && nonzero_sign_valid
7538 	       && !read_modify_subreg_p (SET_DEST (x)))
7539 	{
7540 	  x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7541 			   gen_lowpart
7542 			   (GET_MODE (SUBREG_REG (SET_DEST (x))),
7543 			    SET_SRC (x)));
7544 	  continue;
7545 	}
7546       else
7547 	break;
7548 
7549       while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7550 	inner = SUBREG_REG (inner);
7551 
7552       /* Don't attempt bitwise arithmetic on non scalar integer modes.  */
7553       if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7554 	{
7555 	  /* Don't do anything for vector or complex integral types.  */
7556 	  if (! FLOAT_MODE_P (GET_MODE (inner)))
7557 	    break;
7558 
7559 	  /* Try to find an integral mode to pun with.  */
7560 	  if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7561 	      .exists (&compute_mode))
7562 	    break;
7563 
7564 	  inner = gen_lowpart (compute_mode, inner);
7565 	}
7566 
7567       /* Compute a mask of LEN bits, if we can do this on the host machine.  */
7568       if (len >= HOST_BITS_PER_WIDE_INT)
7569 	break;
7570 
7571       /* Don't try to compute in too wide unsupported modes.  */
7572       if (!targetm.scalar_mode_supported_p (compute_mode))
7573 	break;
7574 
7575       /* Now compute the equivalent expression.  Make a copy of INNER
7576 	 for the SET_DEST in case it is a MEM into which we will substitute;
7577 	 we don't want shared RTL in that case.  */
7578       mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7579 			   compute_mode);
7580       cleared = simplify_gen_binary (AND, compute_mode,
7581 				     simplify_gen_unary (NOT, compute_mode,
7582 				       simplify_gen_binary (ASHIFT,
7583 							    compute_mode,
7584 							    mask, pos),
7585 				       compute_mode),
7586 				     inner);
7587       masked = simplify_gen_binary (ASHIFT, compute_mode,
7588 				    simplify_gen_binary (
7589 				      AND, compute_mode,
7590 				      gen_lowpart (compute_mode, SET_SRC (x)),
7591 				      mask),
7592 				    pos);
7593 
7594       x = gen_rtx_SET (copy_rtx (inner),
7595 		       simplify_gen_binary (IOR, compute_mode,
7596 					    cleared, masked));
7597     }
7598 
7599   return x;
7600 }
7601 
7602 /* Return an RTX for a reference to LEN bits of INNER.  If POS_RTX is nonzero,
7603    it is an RTX that represents the (variable) starting position; otherwise,
7604    POS is the (constant) starting bit position.  Both are counted from the LSB.
7605 
7606    UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7607 
7608    IN_DEST is nonzero if this is a reference in the destination of a SET.
7609    This is used when a ZERO_ or SIGN_EXTRACT isn't needed.  If nonzero,
7610    a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7611    be used.
7612 
7613    IN_COMPARE is nonzero if we are in a COMPARE.  This means that a
7614    ZERO_EXTRACT should be built even for bits starting at bit 0.
7615 
7616    MODE is the desired mode of the result (if IN_DEST == 0).
7617 
7618    The result is an RTX for the extraction or NULL_RTX if the target
7619    can't handle it.  */
7620 
7621 static rtx
7622 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7623 		 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7624 		 int in_dest, int in_compare)
7625 {
7626   /* This mode describes the size of the storage area
7627      to fetch the overall value from.  Within that, we
7628      ignore the POS lowest bits, etc.  */
7629   machine_mode is_mode = GET_MODE (inner);
7630   machine_mode inner_mode;
7631   scalar_int_mode wanted_inner_mode;
7632   scalar_int_mode wanted_inner_reg_mode = word_mode;
7633   scalar_int_mode pos_mode = word_mode;
7634   machine_mode extraction_mode = word_mode;
7635   rtx new_rtx = 0;
7636   rtx orig_pos_rtx = pos_rtx;
7637   HOST_WIDE_INT orig_pos;
7638 
7639   if (pos_rtx && CONST_INT_P (pos_rtx))
7640     pos = INTVAL (pos_rtx), pos_rtx = 0;
7641 
7642   if (GET_CODE (inner) == SUBREG
7643       && subreg_lowpart_p (inner)
7644       && (paradoxical_subreg_p (inner)
7645 	  /* If trying or potentionally trying to extract
7646 	     bits outside of is_mode, don't look through
7647 	     non-paradoxical SUBREGs.  See PR82192.  */
7648 	  || (pos_rtx == NULL_RTX
7649 	      && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7650     {
7651       /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7652 	 consider just the QI as the memory to extract from.
7653 	 The subreg adds or removes high bits; its mode is
7654 	 irrelevant to the meaning of this extraction,
7655 	 since POS and LEN count from the lsb.  */
7656       if (MEM_P (SUBREG_REG (inner)))
7657 	is_mode = GET_MODE (SUBREG_REG (inner));
7658       inner = SUBREG_REG (inner);
7659     }
7660   else if (GET_CODE (inner) == ASHIFT
7661 	   && CONST_INT_P (XEXP (inner, 1))
7662 	   && pos_rtx == 0 && pos == 0
7663 	   && len > UINTVAL (XEXP (inner, 1)))
7664     {
7665       /* We're extracting the least significant bits of an rtx
7666 	 (ashift X (const_int C)), where LEN > C.  Extract the
7667 	 least significant (LEN - C) bits of X, giving an rtx
7668 	 whose mode is MODE, then shift it left C times.  */
7669       new_rtx = make_extraction (mode, XEXP (inner, 0),
7670 			     0, 0, len - INTVAL (XEXP (inner, 1)),
7671 			     unsignedp, in_dest, in_compare);
7672       if (new_rtx != 0)
7673 	return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7674     }
7675   else if (GET_CODE (inner) == TRUNCATE
7676 	   /* If trying or potentionally trying to extract
7677 	      bits outside of is_mode, don't look through
7678 	      TRUNCATE.  See PR82192.  */
7679 	   && pos_rtx == NULL_RTX
7680 	   && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7681     inner = XEXP (inner, 0);
7682 
7683   inner_mode = GET_MODE (inner);
7684 
7685   /* See if this can be done without an extraction.  We never can if the
7686      width of the field is not the same as that of some integer mode. For
7687      registers, we can only avoid the extraction if the position is at the
7688      low-order bit and this is either not in the destination or we have the
7689      appropriate STRICT_LOW_PART operation available.
7690 
7691      For MEM, we can avoid an extract if the field starts on an appropriate
7692      boundary and we can change the mode of the memory reference.  */
7693 
7694   scalar_int_mode tmode;
7695   if (int_mode_for_size (len, 1).exists (&tmode)
7696       && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7697 	   && !MEM_P (inner)
7698 	   && (pos == 0 || REG_P (inner))
7699 	   && (inner_mode == tmode
7700 	       || !REG_P (inner)
7701 	       || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7702 	       || reg_truncated_to_mode (tmode, inner))
7703 	   && (! in_dest
7704 	       || (REG_P (inner)
7705 		   && have_insn_for (STRICT_LOW_PART, tmode))))
7706 	  || (MEM_P (inner) && pos_rtx == 0
7707 	      && (pos
7708 		  % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7709 		     : BITS_PER_UNIT)) == 0
7710 	      /* We can't do this if we are widening INNER_MODE (it
7711 		 may not be aligned, for one thing).  */
7712 	      && !paradoxical_subreg_p (tmode, inner_mode)
7713 	      && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7714 	      && (inner_mode == tmode
7715 		  || (! mode_dependent_address_p (XEXP (inner, 0),
7716 						  MEM_ADDR_SPACE (inner))
7717 		      && ! MEM_VOLATILE_P (inner))))))
7718     {
7719       /* If INNER is a MEM, make a new MEM that encompasses just the desired
7720 	 field.  If the original and current mode are the same, we need not
7721 	 adjust the offset.  Otherwise, we do if bytes big endian.
7722 
7723 	 If INNER is not a MEM, get a piece consisting of just the field
7724 	 of interest (in this case POS % BITS_PER_WORD must be 0).  */
7725 
7726       if (MEM_P (inner))
7727 	{
7728 	  poly_int64 offset;
7729 
7730 	  /* POS counts from lsb, but make OFFSET count in memory order.  */
7731 	  if (BYTES_BIG_ENDIAN)
7732 	    offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7733 					       - len - pos);
7734 	  else
7735 	    offset = pos / BITS_PER_UNIT;
7736 
7737 	  new_rtx = adjust_address_nv (inner, tmode, offset);
7738 	}
7739       else if (REG_P (inner))
7740 	{
7741 	  if (tmode != inner_mode)
7742 	    {
7743 	      /* We can't call gen_lowpart in a DEST since we
7744 		 always want a SUBREG (see below) and it would sometimes
7745 		 return a new hard register.  */
7746 	      if (pos || in_dest)
7747 		{
7748 		  poly_uint64 offset
7749 		    = subreg_offset_from_lsb (tmode, inner_mode, pos);
7750 
7751 		  /* Avoid creating invalid subregs, for example when
7752 		     simplifying (x>>32)&255.  */
7753 		  if (!validate_subreg (tmode, inner_mode, inner, offset))
7754 		    return NULL_RTX;
7755 
7756 		  new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7757 		}
7758 	      else
7759 		new_rtx = gen_lowpart (tmode, inner);
7760 	    }
7761 	  else
7762 	    new_rtx = inner;
7763 	}
7764       else
7765 	new_rtx = force_to_mode (inner, tmode,
7766 				 len >= HOST_BITS_PER_WIDE_INT
7767 				 ? HOST_WIDE_INT_M1U
7768 				 : (HOST_WIDE_INT_1U << len) - 1, 0);
7769 
7770       /* If this extraction is going into the destination of a SET,
7771 	 make a STRICT_LOW_PART unless we made a MEM.  */
7772 
7773       if (in_dest)
7774 	return (MEM_P (new_rtx) ? new_rtx
7775 		: (GET_CODE (new_rtx) != SUBREG
7776 		   ? gen_rtx_CLOBBER (tmode, const0_rtx)
7777 		   : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7778 
7779       if (mode == tmode)
7780 	return new_rtx;
7781 
7782       if (CONST_SCALAR_INT_P (new_rtx))
7783 	return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7784 					 mode, new_rtx, tmode);
7785 
7786       /* If we know that no extraneous bits are set, and that the high
7787 	 bit is not set, convert the extraction to the cheaper of
7788 	 sign and zero extension, that are equivalent in these cases.  */
7789       if (flag_expensive_optimizations
7790 	  && (HWI_COMPUTABLE_MODE_P (tmode)
7791 	      && ((nonzero_bits (new_rtx, tmode)
7792 		   & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7793 		  == 0)))
7794 	{
7795 	  rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7796 	  rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7797 
7798 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7799 	     backends.  */
7800 	  if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7801 	      <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7802 	    return temp;
7803 	  return temp1;
7804 	}
7805 
7806       /* Otherwise, sign- or zero-extend unless we already are in the
7807 	 proper mode.  */
7808 
7809       return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7810 			     mode, new_rtx));
7811     }
7812 
7813   /* Unless this is a COMPARE or we have a funny memory reference,
7814      don't do anything with zero-extending field extracts starting at
7815      the low-order bit since they are simple AND operations.  */
7816   if (pos_rtx == 0 && pos == 0 && ! in_dest
7817       && ! in_compare && unsignedp)
7818     return 0;
7819 
7820   /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7821      if the position is not a constant and the length is not 1.  In all
7822      other cases, we would only be going outside our object in cases when
7823      an original shift would have been undefined.  */
7824   if (MEM_P (inner)
7825       && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7826 	  || (pos_rtx != 0 && len != 1)))
7827     return 0;
7828 
7829   enum extraction_pattern pattern = (in_dest ? EP_insv
7830 				     : unsignedp ? EP_extzv : EP_extv);
7831 
7832   /* If INNER is not from memory, we want it to have the mode of a register
7833      extraction pattern's structure operand, or word_mode if there is no
7834      such pattern.  The same applies to extraction_mode and pos_mode
7835      and their respective operands.
7836 
7837      For memory, assume that the desired extraction_mode and pos_mode
7838      are the same as for a register operation, since at present we don't
7839      have named patterns for aligned memory structures.  */
7840   struct extraction_insn insn;
7841   unsigned int inner_size;
7842   if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7843       && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7844     {
7845       wanted_inner_reg_mode = insn.struct_mode.require ();
7846       pos_mode = insn.pos_mode;
7847       extraction_mode = insn.field_mode;
7848     }
7849 
7850   /* Never narrow an object, since that might not be safe.  */
7851 
7852   if (mode != VOIDmode
7853       && partial_subreg_p (extraction_mode, mode))
7854     extraction_mode = mode;
7855 
7856   /* Punt if len is too large for extraction_mode.  */
7857   if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7858     return NULL_RTX;
7859 
7860   if (!MEM_P (inner))
7861     wanted_inner_mode = wanted_inner_reg_mode;
7862   else
7863     {
7864       /* Be careful not to go beyond the extracted object and maintain the
7865 	 natural alignment of the memory.  */
7866       wanted_inner_mode = smallest_int_mode_for_size (len);
7867       while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7868 	     > GET_MODE_BITSIZE (wanted_inner_mode))
7869 	wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7870     }
7871 
7872   orig_pos = pos;
7873 
7874   if (BITS_BIG_ENDIAN)
7875     {
7876       /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7877 	 BITS_BIG_ENDIAN style.  If position is constant, compute new
7878 	 position.  Otherwise, build subtraction.
7879 	 Note that POS is relative to the mode of the original argument.
7880 	 If it's a MEM we need to recompute POS relative to that.
7881 	 However, if we're extracting from (or inserting into) a register,
7882 	 we want to recompute POS relative to wanted_inner_mode.  */
7883       int width;
7884       if (!MEM_P (inner))
7885 	width = GET_MODE_BITSIZE (wanted_inner_mode);
7886       else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7887 	return NULL_RTX;
7888 
7889       if (pos_rtx == 0)
7890 	pos = width - len - pos;
7891       else
7892 	pos_rtx
7893 	  = gen_rtx_MINUS (GET_MODE (pos_rtx),
7894 			   gen_int_mode (width - len, GET_MODE (pos_rtx)),
7895 			   pos_rtx);
7896       /* POS may be less than 0 now, but we check for that below.
7897 	 Note that it can only be less than 0 if !MEM_P (inner).  */
7898     }
7899 
7900   /* If INNER has a wider mode, and this is a constant extraction, try to
7901      make it smaller and adjust the byte to point to the byte containing
7902      the value.  */
7903   if (wanted_inner_mode != VOIDmode
7904       && inner_mode != wanted_inner_mode
7905       && ! pos_rtx
7906       && partial_subreg_p (wanted_inner_mode, is_mode)
7907       && MEM_P (inner)
7908       && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7909       && ! MEM_VOLATILE_P (inner))
7910     {
7911       poly_int64 offset = 0;
7912 
7913       /* The computations below will be correct if the machine is big
7914 	 endian in both bits and bytes or little endian in bits and bytes.
7915 	 If it is mixed, we must adjust.  */
7916 
7917       /* If bytes are big endian and we had a paradoxical SUBREG, we must
7918 	 adjust OFFSET to compensate.  */
7919       if (BYTES_BIG_ENDIAN
7920 	  && paradoxical_subreg_p (is_mode, inner_mode))
7921 	offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7922 
7923       /* We can now move to the desired byte.  */
7924       offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7925 		* GET_MODE_SIZE (wanted_inner_mode);
7926       pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7927 
7928       if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7929 	  && is_mode != wanted_inner_mode)
7930 	offset = (GET_MODE_SIZE (is_mode)
7931 		  - GET_MODE_SIZE (wanted_inner_mode) - offset);
7932 
7933       inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7934     }
7935 
7936   /* If INNER is not memory, get it into the proper mode.  If we are changing
7937      its mode, POS must be a constant and smaller than the size of the new
7938      mode.  */
7939   else if (!MEM_P (inner))
7940     {
7941       /* On the LHS, don't create paradoxical subregs implicitely truncating
7942 	 the register unless TARGET_TRULY_NOOP_TRUNCATION.  */
7943       if (in_dest
7944 	  && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7945 					     wanted_inner_mode))
7946 	return NULL_RTX;
7947 
7948       if (GET_MODE (inner) != wanted_inner_mode
7949 	  && (pos_rtx != 0
7950 	      || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7951 	return NULL_RTX;
7952 
7953       if (orig_pos < 0)
7954 	return NULL_RTX;
7955 
7956       inner = force_to_mode (inner, wanted_inner_mode,
7957 			     pos_rtx
7958 			     || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7959 			     ? HOST_WIDE_INT_M1U
7960 			     : (((HOST_WIDE_INT_1U << len) - 1)
7961 				<< orig_pos),
7962 			     0);
7963     }
7964 
7965   /* Adjust mode of POS_RTX, if needed.  If we want a wider mode, we
7966      have to zero extend.  Otherwise, we can just use a SUBREG.
7967 
7968      We dealt with constant rtxes earlier, so pos_rtx cannot
7969      have VOIDmode at this point.  */
7970   if (pos_rtx != 0
7971       && (GET_MODE_SIZE (pos_mode)
7972 	  > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7973     {
7974       rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7975 				     GET_MODE (pos_rtx));
7976 
7977       /* If we know that no extraneous bits are set, and that the high
7978 	 bit is not set, convert extraction to cheaper one - either
7979 	 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7980 	 cases.  */
7981       if (flag_expensive_optimizations
7982 	  && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7983 	      && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7984 		   & ~(((unsigned HOST_WIDE_INT)
7985 			GET_MODE_MASK (GET_MODE (pos_rtx)))
7986 		       >> 1))
7987 		  == 0)))
7988 	{
7989 	  rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7990 					  GET_MODE (pos_rtx));
7991 
7992 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7993 	     backends.  */
7994 	  if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7995 	      < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7996 	    temp = temp1;
7997 	}
7998       pos_rtx = temp;
7999     }
8000 
8001   /* Make POS_RTX unless we already have it and it is correct.  If we don't
8002      have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
8003      be a CONST_INT.  */
8004   if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
8005     pos_rtx = orig_pos_rtx;
8006 
8007   else if (pos_rtx == 0)
8008     pos_rtx = GEN_INT (pos);
8009 
8010   /* Make the required operation.  See if we can use existing rtx.  */
8011   new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
8012 			 extraction_mode, inner, GEN_INT (len), pos_rtx);
8013   if (! in_dest)
8014     new_rtx = gen_lowpart (mode, new_rtx);
8015 
8016   return new_rtx;
8017 }
8018 
8019 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8020    can be commuted with any other operations in X.  Return X without
8021    that shift if so.  */
8022 
8023 static rtx
8024 extract_left_shift (scalar_int_mode mode, rtx x, int count)
8025 {
8026   enum rtx_code code = GET_CODE (x);
8027   rtx tem;
8028 
8029   switch (code)
8030     {
8031     case ASHIFT:
8032       /* This is the shift itself.  If it is wide enough, we will return
8033 	 either the value being shifted if the shift count is equal to
8034 	 COUNT or a shift for the difference.  */
8035       if (CONST_INT_P (XEXP (x, 1))
8036 	  && INTVAL (XEXP (x, 1)) >= count)
8037 	return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
8038 				     INTVAL (XEXP (x, 1)) - count);
8039       break;
8040 
8041     case NEG:  case NOT:
8042       if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8043 	return simplify_gen_unary (code, mode, tem, mode);
8044 
8045       break;
8046 
8047     case PLUS:  case IOR:  case XOR:  case AND:
8048       /* If we can safely shift this constant and we find the inner shift,
8049 	 make a new operation.  */
8050       if (CONST_INT_P (XEXP (x, 1))
8051 	  && (UINTVAL (XEXP (x, 1))
8052 	      & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
8053 	  && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8054 	{
8055 	  HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
8056 	  return simplify_gen_binary (code, mode, tem,
8057 				      gen_int_mode (val, mode));
8058 	}
8059       break;
8060 
8061     default:
8062       break;
8063     }
8064 
8065   return 0;
8066 }
8067 
8068 /* Subroutine of make_compound_operation.  *X_PTR is the rtx at the current
8069    level of the expression and MODE is its mode.  IN_CODE is as for
8070    make_compound_operation.  *NEXT_CODE_PTR is the value of IN_CODE
8071    that should be used when recursing on operands of *X_PTR.
8072 
8073    There are two possible actions:
8074 
8075    - Return null.  This tells the caller to recurse on *X_PTR with IN_CODE
8076      equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8077 
8078    - Return a new rtx, which the caller returns directly.  */
8079 
8080 static rtx
8081 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
8082 			     enum rtx_code in_code,
8083 			     enum rtx_code *next_code_ptr)
8084 {
8085   rtx x = *x_ptr;
8086   enum rtx_code next_code = *next_code_ptr;
8087   enum rtx_code code = GET_CODE (x);
8088   int mode_width = GET_MODE_PRECISION (mode);
8089   rtx rhs, lhs;
8090   rtx new_rtx = 0;
8091   int i;
8092   rtx tem;
8093   scalar_int_mode inner_mode;
8094   bool equality_comparison = false;
8095 
8096   if (in_code == EQ)
8097     {
8098       equality_comparison = true;
8099       in_code = COMPARE;
8100     }
8101 
8102   /* Process depending on the code of this operation.  If NEW is set
8103      nonzero, it will be returned.  */
8104 
8105   switch (code)
8106     {
8107     case ASHIFT:
8108       /* Convert shifts by constants into multiplications if inside
8109 	 an address.  */
8110       if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8111 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8112 	  && INTVAL (XEXP (x, 1)) >= 0)
8113 	{
8114 	  HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8115 	  HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8116 
8117 	  new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8118 	  if (GET_CODE (new_rtx) == NEG)
8119 	    {
8120 	      new_rtx = XEXP (new_rtx, 0);
8121 	      multval = -multval;
8122 	    }
8123 	  multval = trunc_int_for_mode (multval, mode);
8124 	  new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8125 	}
8126       break;
8127 
8128     case PLUS:
8129       lhs = XEXP (x, 0);
8130       rhs = XEXP (x, 1);
8131       lhs = make_compound_operation (lhs, next_code);
8132       rhs = make_compound_operation (rhs, next_code);
8133       if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8134 	{
8135 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8136 				     XEXP (lhs, 1));
8137 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8138 	}
8139       else if (GET_CODE (lhs) == MULT
8140 	       && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8141 	{
8142 	  tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8143 				     simplify_gen_unary (NEG, mode,
8144 							 XEXP (lhs, 1),
8145 							 mode));
8146 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8147 	}
8148       else
8149 	{
8150 	  SUBST (XEXP (x, 0), lhs);
8151 	  SUBST (XEXP (x, 1), rhs);
8152 	}
8153       maybe_swap_commutative_operands (x);
8154       return x;
8155 
8156     case MINUS:
8157       lhs = XEXP (x, 0);
8158       rhs = XEXP (x, 1);
8159       lhs = make_compound_operation (lhs, next_code);
8160       rhs = make_compound_operation (rhs, next_code);
8161       if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8162 	{
8163 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8164 				     XEXP (rhs, 1));
8165 	  return simplify_gen_binary (PLUS, mode, tem, lhs);
8166 	}
8167       else if (GET_CODE (rhs) == MULT
8168 	       && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8169 	{
8170 	  tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8171 				     simplify_gen_unary (NEG, mode,
8172 							 XEXP (rhs, 1),
8173 							 mode));
8174 	  return simplify_gen_binary (PLUS, mode, tem, lhs);
8175 	}
8176       else
8177 	{
8178 	  SUBST (XEXP (x, 0), lhs);
8179 	  SUBST (XEXP (x, 1), rhs);
8180 	  return x;
8181 	}
8182 
8183     case AND:
8184       /* If the second operand is not a constant, we can't do anything
8185 	 with it.  */
8186       if (!CONST_INT_P (XEXP (x, 1)))
8187 	break;
8188 
8189       /* If the constant is a power of two minus one and the first operand
8190 	 is a logical right shift, make an extraction.  */
8191       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8192 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8193 	{
8194 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8195 	  new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8196 				     i, 1, 0, in_code == COMPARE);
8197 	}
8198 
8199       /* Same as previous, but for (subreg (lshiftrt ...)) in first op.  */
8200       else if (GET_CODE (XEXP (x, 0)) == SUBREG
8201 	       && subreg_lowpart_p (XEXP (x, 0))
8202 	       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8203 					  &inner_mode)
8204 	       && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8205 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8206 	{
8207 	  rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8208 	  new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8209 	  new_rtx = make_extraction (inner_mode, new_rtx, 0,
8210 				     XEXP (inner_x0, 1),
8211 				     i, 1, 0, in_code == COMPARE);
8212 
8213 	  /* If we narrowed the mode when dropping the subreg, then we lose.  */
8214 	  if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8215 	    new_rtx = NULL;
8216 
8217 	  /* If that didn't give anything, see if the AND simplifies on
8218 	     its own.  */
8219 	  if (!new_rtx && i >= 0)
8220 	    {
8221 	      new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8222 	      new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8223 					 0, in_code == COMPARE);
8224 	    }
8225 	}
8226       /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)).  */
8227       else if ((GET_CODE (XEXP (x, 0)) == XOR
8228 		|| GET_CODE (XEXP (x, 0)) == IOR)
8229 	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8230 	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8231 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8232 	{
8233 	  /* Apply the distributive law, and then try to make extractions.  */
8234 	  new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8235 				    gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8236 						 XEXP (x, 1)),
8237 				    gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8238 						 XEXP (x, 1)));
8239 	  new_rtx = make_compound_operation (new_rtx, in_code);
8240 	}
8241 
8242       /* If we are have (and (rotate X C) M) and C is larger than the number
8243 	 of bits in M, this is an extraction.  */
8244 
8245       else if (GET_CODE (XEXP (x, 0)) == ROTATE
8246 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8247 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8248 	       && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8249 	{
8250 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8251 	  new_rtx = make_extraction (mode, new_rtx,
8252 				     (GET_MODE_PRECISION (mode)
8253 				      - INTVAL (XEXP (XEXP (x, 0), 1))),
8254 				     NULL_RTX, i, 1, 0, in_code == COMPARE);
8255 	}
8256 
8257       /* On machines without logical shifts, if the operand of the AND is
8258 	 a logical shift and our mask turns off all the propagated sign
8259 	 bits, we can replace the logical shift with an arithmetic shift.  */
8260       else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8261 	       && !have_insn_for (LSHIFTRT, mode)
8262 	       && have_insn_for (ASHIFTRT, mode)
8263 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8264 	       && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8265 	       && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8266 	       && mode_width <= HOST_BITS_PER_WIDE_INT)
8267 	{
8268 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8269 
8270 	  mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8271 	  if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8272 	    SUBST (XEXP (x, 0),
8273 		   gen_rtx_ASHIFTRT (mode,
8274 				     make_compound_operation (XEXP (XEXP (x,
8275 									  0),
8276 								    0),
8277 							      next_code),
8278 				     XEXP (XEXP (x, 0), 1)));
8279 	}
8280 
8281       /* If the constant is one less than a power of two, this might be
8282 	 representable by an extraction even if no shift is present.
8283 	 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8284 	 we are in a COMPARE.  */
8285       else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8286 	new_rtx = make_extraction (mode,
8287 				   make_compound_operation (XEXP (x, 0),
8288 							    next_code),
8289 				   0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8290 
8291       /* If we are in a comparison and this is an AND with a power of two,
8292 	 convert this into the appropriate bit extract.  */
8293       else if (in_code == COMPARE
8294 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8295 	       && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8296 	new_rtx = make_extraction (mode,
8297 				   make_compound_operation (XEXP (x, 0),
8298 							    next_code),
8299 				   i, NULL_RTX, 1, 1, 0, 1);
8300 
8301       /* If the one operand is a paradoxical subreg of a register or memory and
8302 	 the constant (limited to the smaller mode) has only zero bits where
8303 	 the sub expression has known zero bits, this can be expressed as
8304 	 a zero_extend.  */
8305       else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8306 	{
8307 	  rtx sub;
8308 
8309 	  sub = XEXP (XEXP (x, 0), 0);
8310 	  machine_mode sub_mode = GET_MODE (sub);
8311 	  int sub_width;
8312 	  if ((REG_P (sub) || MEM_P (sub))
8313 	      && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8314 	      && sub_width < mode_width)
8315 	    {
8316 	      unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8317 	      unsigned HOST_WIDE_INT mask;
8318 
8319 	      /* original AND constant with all the known zero bits set */
8320 	      mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8321 	      if ((mask & mode_mask) == mode_mask)
8322 		{
8323 		  new_rtx = make_compound_operation (sub, next_code);
8324 		  new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8325 					     1, 0, in_code == COMPARE);
8326 		}
8327 	    }
8328 	}
8329 
8330       break;
8331 
8332     case LSHIFTRT:
8333       /* If the sign bit is known to be zero, replace this with an
8334 	 arithmetic shift.  */
8335       if (have_insn_for (ASHIFTRT, mode)
8336 	  && ! have_insn_for (LSHIFTRT, mode)
8337 	  && mode_width <= HOST_BITS_PER_WIDE_INT
8338 	  && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8339 	{
8340 	  new_rtx = gen_rtx_ASHIFTRT (mode,
8341 				      make_compound_operation (XEXP (x, 0),
8342 							       next_code),
8343 				      XEXP (x, 1));
8344 	  break;
8345 	}
8346 
8347       /* fall through */
8348 
8349     case ASHIFTRT:
8350       lhs = XEXP (x, 0);
8351       rhs = XEXP (x, 1);
8352 
8353       /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8354 	 this is a SIGN_EXTRACT.  */
8355       if (CONST_INT_P (rhs)
8356 	  && GET_CODE (lhs) == ASHIFT
8357 	  && CONST_INT_P (XEXP (lhs, 1))
8358 	  && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8359 	  && INTVAL (XEXP (lhs, 1)) >= 0
8360 	  && INTVAL (rhs) < mode_width)
8361 	{
8362 	  new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8363 	  new_rtx = make_extraction (mode, new_rtx,
8364 				     INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8365 				     NULL_RTX, mode_width - INTVAL (rhs),
8366 				     code == LSHIFTRT, 0, in_code == COMPARE);
8367 	  break;
8368 	}
8369 
8370       /* See if we have operations between an ASHIFTRT and an ASHIFT.
8371 	 If so, try to merge the shifts into a SIGN_EXTEND.  We could
8372 	 also do this for some cases of SIGN_EXTRACT, but it doesn't
8373 	 seem worth the effort; the case checked for occurs on Alpha.  */
8374 
8375       if (!OBJECT_P (lhs)
8376 	  && ! (GET_CODE (lhs) == SUBREG
8377 		&& (OBJECT_P (SUBREG_REG (lhs))))
8378 	  && CONST_INT_P (rhs)
8379 	  && INTVAL (rhs) >= 0
8380 	  && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8381 	  && INTVAL (rhs) < mode_width
8382 	  && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8383 	new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8384 								  next_code),
8385 				   0, NULL_RTX, mode_width - INTVAL (rhs),
8386 				   code == LSHIFTRT, 0, in_code == COMPARE);
8387 
8388       break;
8389 
8390     case SUBREG:
8391       /* Call ourselves recursively on the inner expression.  If we are
8392 	 narrowing the object and it has a different RTL code from
8393 	 what it originally did, do this SUBREG as a force_to_mode.  */
8394       {
8395 	rtx inner = SUBREG_REG (x), simplified;
8396 	enum rtx_code subreg_code = in_code;
8397 
8398 	/* If the SUBREG is masking of a logical right shift,
8399 	   make an extraction.  */
8400 	if (GET_CODE (inner) == LSHIFTRT
8401 	    && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8402 	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8403 	    && CONST_INT_P (XEXP (inner, 1))
8404 	    && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8405 	    && subreg_lowpart_p (x))
8406 	  {
8407 	    new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8408 	    int width = GET_MODE_PRECISION (inner_mode)
8409 			- INTVAL (XEXP (inner, 1));
8410 	    if (width > mode_width)
8411 	      width = mode_width;
8412 	    new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8413 				       width, 1, 0, in_code == COMPARE);
8414 	    break;
8415 	  }
8416 
8417 	/* If in_code is COMPARE, it isn't always safe to pass it through
8418 	   to the recursive make_compound_operation call.  */
8419 	if (subreg_code == COMPARE
8420 	    && (!subreg_lowpart_p (x)
8421 		|| GET_CODE (inner) == SUBREG
8422 		/* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8423 		   is (const_int 0), rather than
8424 		   (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8425 		   Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8426 		   for non-equality comparisons against 0 is not equivalent
8427 		   to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0).  */
8428 		|| (GET_CODE (inner) == AND
8429 		    && CONST_INT_P (XEXP (inner, 1))
8430 		    && partial_subreg_p (x)
8431 		    && exact_log2 (UINTVAL (XEXP (inner, 1)))
8432 		       >= GET_MODE_BITSIZE (mode) - 1)))
8433 	  subreg_code = SET;
8434 
8435 	tem = make_compound_operation (inner, subreg_code);
8436 
8437 	simplified
8438 	  = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8439 	if (simplified)
8440 	  tem = simplified;
8441 
8442 	if (GET_CODE (tem) != GET_CODE (inner)
8443 	    && partial_subreg_p (x)
8444 	    && subreg_lowpart_p (x))
8445 	  {
8446 	    rtx newer
8447 	      = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8448 
8449 	    /* If we have something other than a SUBREG, we might have
8450 	       done an expansion, so rerun ourselves.  */
8451 	    if (GET_CODE (newer) != SUBREG)
8452 	      newer = make_compound_operation (newer, in_code);
8453 
8454 	    /* force_to_mode can expand compounds.  If it just re-expanded
8455 	       the compound, use gen_lowpart to convert to the desired
8456 	       mode.  */
8457 	    if (rtx_equal_p (newer, x)
8458 		/* Likewise if it re-expanded the compound only partially.
8459 		   This happens for SUBREG of ZERO_EXTRACT if they extract
8460 		   the same number of bits.  */
8461 		|| (GET_CODE (newer) == SUBREG
8462 		    && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8463 			|| GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8464 		    && GET_CODE (inner) == AND
8465 		    && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8466 	      return gen_lowpart (GET_MODE (x), tem);
8467 
8468 	    return newer;
8469 	  }
8470 
8471 	if (simplified)
8472 	  return tem;
8473       }
8474       break;
8475 
8476     default:
8477       break;
8478     }
8479 
8480   if (new_rtx)
8481     *x_ptr = gen_lowpart (mode, new_rtx);
8482   *next_code_ptr = next_code;
8483   return NULL_RTX;
8484 }
8485 
8486 /* Look at the expression rooted at X.  Look for expressions
8487    equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8488    Form these expressions.
8489 
8490    Return the new rtx, usually just X.
8491 
8492    Also, for machines like the VAX that don't have logical shift insns,
8493    try to convert logical to arithmetic shift operations in cases where
8494    they are equivalent.  This undoes the canonicalizations to logical
8495    shifts done elsewhere.
8496 
8497    We try, as much as possible, to re-use rtl expressions to save memory.
8498 
8499    IN_CODE says what kind of expression we are processing.  Normally, it is
8500    SET.  In a memory address it is MEM.  When processing the arguments of
8501    a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8502    precisely it is an equality comparison against zero.  */
8503 
8504 rtx
8505 make_compound_operation (rtx x, enum rtx_code in_code)
8506 {
8507   enum rtx_code code = GET_CODE (x);
8508   const char *fmt;
8509   int i, j;
8510   enum rtx_code next_code;
8511   rtx new_rtx, tem;
8512 
8513   /* Select the code to be used in recursive calls.  Once we are inside an
8514      address, we stay there.  If we have a comparison, set to COMPARE,
8515      but once inside, go back to our default of SET.  */
8516 
8517   next_code = (code == MEM ? MEM
8518 	       : ((code == COMPARE || COMPARISON_P (x))
8519 		  && XEXP (x, 1) == const0_rtx) ? COMPARE
8520 	       : in_code == COMPARE || in_code == EQ ? SET : in_code);
8521 
8522   scalar_int_mode mode;
8523   if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8524     {
8525       rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8526 						 &next_code);
8527       if (new_rtx)
8528 	return new_rtx;
8529       code = GET_CODE (x);
8530     }
8531 
8532   /* Now recursively process each operand of this operation.  We need to
8533      handle ZERO_EXTEND specially so that we don't lose track of the
8534      inner mode.  */
8535   if (code == ZERO_EXTEND)
8536     {
8537       new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8538       tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8539 					    new_rtx, GET_MODE (XEXP (x, 0)));
8540       if (tem)
8541 	return tem;
8542       SUBST (XEXP (x, 0), new_rtx);
8543       return x;
8544     }
8545 
8546   fmt = GET_RTX_FORMAT (code);
8547   for (i = 0; i < GET_RTX_LENGTH (code); i++)
8548     if (fmt[i] == 'e')
8549       {
8550 	new_rtx = make_compound_operation (XEXP (x, i), next_code);
8551 	SUBST (XEXP (x, i), new_rtx);
8552       }
8553     else if (fmt[i] == 'E')
8554       for (j = 0; j < XVECLEN (x, i); j++)
8555 	{
8556 	  new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8557 	  SUBST (XVECEXP (x, i, j), new_rtx);
8558 	}
8559 
8560   maybe_swap_commutative_operands (x);
8561   return x;
8562 }
8563 
8564 /* Given M see if it is a value that would select a field of bits
8565    within an item, but not the entire word.  Return -1 if not.
8566    Otherwise, return the starting position of the field, where 0 is the
8567    low-order bit.
8568 
8569    *PLEN is set to the length of the field.  */
8570 
8571 static int
8572 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8573 {
8574   /* Get the bit number of the first 1 bit from the right, -1 if none.  */
8575   int pos = m ? ctz_hwi (m) : -1;
8576   int len = 0;
8577 
8578   if (pos >= 0)
8579     /* Now shift off the low-order zero bits and see if we have a
8580        power of two minus 1.  */
8581     len = exact_log2 ((m >> pos) + 1);
8582 
8583   if (len <= 0)
8584     pos = -1;
8585 
8586   *plen = len;
8587   return pos;
8588 }
8589 
8590 /* If X refers to a register that equals REG in value, replace these
8591    references with REG.  */
8592 static rtx
8593 canon_reg_for_combine (rtx x, rtx reg)
8594 {
8595   rtx op0, op1, op2;
8596   const char *fmt;
8597   int i;
8598   bool copied;
8599 
8600   enum rtx_code code = GET_CODE (x);
8601   switch (GET_RTX_CLASS (code))
8602     {
8603     case RTX_UNARY:
8604       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8605       if (op0 != XEXP (x, 0))
8606 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8607 				   GET_MODE (reg));
8608       break;
8609 
8610     case RTX_BIN_ARITH:
8611     case RTX_COMM_ARITH:
8612       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8613       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8614       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8615 	return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8616       break;
8617 
8618     case RTX_COMPARE:
8619     case RTX_COMM_COMPARE:
8620       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8621       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8622       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8623 	return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8624 					GET_MODE (op0), op0, op1);
8625       break;
8626 
8627     case RTX_TERNARY:
8628     case RTX_BITFIELD_OPS:
8629       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8630       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8631       op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8632       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8633 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8634 				     GET_MODE (op0), op0, op1, op2);
8635       /* FALLTHRU */
8636 
8637     case RTX_OBJ:
8638       if (REG_P (x))
8639 	{
8640 	  if (rtx_equal_p (get_last_value (reg), x)
8641 	      || rtx_equal_p (reg, get_last_value (x)))
8642 	    return reg;
8643 	  else
8644 	    break;
8645 	}
8646 
8647       /* fall through */
8648 
8649     default:
8650       fmt = GET_RTX_FORMAT (code);
8651       copied = false;
8652       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8653 	if (fmt[i] == 'e')
8654 	  {
8655 	    rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8656 	    if (op != XEXP (x, i))
8657 	      {
8658 		if (!copied)
8659 		  {
8660 		    copied = true;
8661 		    x = copy_rtx (x);
8662 		  }
8663 		XEXP (x, i) = op;
8664 	      }
8665 	  }
8666 	else if (fmt[i] == 'E')
8667 	  {
8668 	    int j;
8669 	    for (j = 0; j < XVECLEN (x, i); j++)
8670 	      {
8671 		rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8672 		if (op != XVECEXP (x, i, j))
8673 		  {
8674 		    if (!copied)
8675 		      {
8676 			copied = true;
8677 			x = copy_rtx (x);
8678 		      }
8679 		    XVECEXP (x, i, j) = op;
8680 		  }
8681 	      }
8682 	  }
8683 
8684       break;
8685     }
8686 
8687   return x;
8688 }
8689 
8690 /* Return X converted to MODE.  If the value is already truncated to
8691    MODE we can just return a subreg even though in the general case we
8692    would need an explicit truncation.  */
8693 
8694 static rtx
8695 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8696 {
8697   if (!CONST_INT_P (x)
8698       && partial_subreg_p (mode, GET_MODE (x))
8699       && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8700       && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8701     {
8702       /* Bit-cast X into an integer mode.  */
8703       if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8704 	x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8705       x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8706 			      x, GET_MODE (x));
8707     }
8708 
8709   return gen_lowpart (mode, x);
8710 }
8711 
8712 /* See if X can be simplified knowing that we will only refer to it in
8713    MODE and will only refer to those bits that are nonzero in MASK.
8714    If other bits are being computed or if masking operations are done
8715    that select a superset of the bits in MASK, they can sometimes be
8716    ignored.
8717 
8718    Return a possibly simplified expression, but always convert X to
8719    MODE.  If X is a CONST_INT, AND the CONST_INT with MASK.
8720 
8721    If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8722    are all off in X.  This is used when X will be complemented, by either
8723    NOT, NEG, or XOR.  */
8724 
8725 static rtx
8726 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8727 	       int just_select)
8728 {
8729   enum rtx_code code = GET_CODE (x);
8730   int next_select = just_select || code == XOR || code == NOT || code == NEG;
8731   machine_mode op_mode;
8732   unsigned HOST_WIDE_INT nonzero;
8733 
8734   /* If this is a CALL or ASM_OPERANDS, don't do anything.  Some of the
8735      code below will do the wrong thing since the mode of such an
8736      expression is VOIDmode.
8737 
8738      Also do nothing if X is a CLOBBER; this can happen if X was
8739      the return value from a call to gen_lowpart.  */
8740   if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8741     return x;
8742 
8743   /* We want to perform the operation in its present mode unless we know
8744      that the operation is valid in MODE, in which case we do the operation
8745      in MODE.  */
8746   op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8747 	      && have_insn_for (code, mode))
8748 	     ? mode : GET_MODE (x));
8749 
8750   /* It is not valid to do a right-shift in a narrower mode
8751      than the one it came in with.  */
8752   if ((code == LSHIFTRT || code == ASHIFTRT)
8753       && partial_subreg_p (mode, GET_MODE (x)))
8754     op_mode = GET_MODE (x);
8755 
8756   /* Truncate MASK to fit OP_MODE.  */
8757   if (op_mode)
8758     mask &= GET_MODE_MASK (op_mode);
8759 
8760   /* Determine what bits of X are guaranteed to be (non)zero.  */
8761   nonzero = nonzero_bits (x, mode);
8762 
8763   /* If none of the bits in X are needed, return a zero.  */
8764   if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8765     x = const0_rtx;
8766 
8767   /* If X is a CONST_INT, return a new one.  Do this here since the
8768      test below will fail.  */
8769   if (CONST_INT_P (x))
8770     {
8771       if (SCALAR_INT_MODE_P (mode))
8772 	return gen_int_mode (INTVAL (x) & mask, mode);
8773       else
8774 	{
8775 	  x = GEN_INT (INTVAL (x) & mask);
8776 	  return gen_lowpart_common (mode, x);
8777 	}
8778     }
8779 
8780   /* If X is narrower than MODE and we want all the bits in X's mode, just
8781      get X in the proper mode.  */
8782   if (paradoxical_subreg_p (mode, GET_MODE (x))
8783       && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8784     return gen_lowpart (mode, x);
8785 
8786   /* We can ignore the effect of a SUBREG if it narrows the mode or
8787      if the constant masks to zero all the bits the mode doesn't have.  */
8788   if (GET_CODE (x) == SUBREG
8789       && subreg_lowpart_p (x)
8790       && (partial_subreg_p (x)
8791 	  || (mask
8792 	      & GET_MODE_MASK (GET_MODE (x))
8793 	      & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8794     return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8795 
8796   scalar_int_mode int_mode, xmode;
8797   if (is_a <scalar_int_mode> (mode, &int_mode)
8798       && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8799     /* OP_MODE is either MODE or XMODE, so it must be a scalar
8800        integer too.  */
8801     return force_int_to_mode (x, int_mode, xmode,
8802 			      as_a <scalar_int_mode> (op_mode),
8803 			      mask, just_select);
8804 
8805   return gen_lowpart_or_truncate (mode, x);
8806 }
8807 
8808 /* Subroutine of force_to_mode that handles cases in which both X and
8809    the result are scalar integers.  MODE is the mode of the result,
8810    XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8811    is preferred for simplified versions of X.  The other arguments
8812    are as for force_to_mode.  */
8813 
8814 static rtx
8815 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8816 		   scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8817 		   int just_select)
8818 {
8819   enum rtx_code code = GET_CODE (x);
8820   int next_select = just_select || code == XOR || code == NOT || code == NEG;
8821   unsigned HOST_WIDE_INT fuller_mask;
8822   rtx op0, op1, temp;
8823   poly_int64 const_op0;
8824 
8825   /* When we have an arithmetic operation, or a shift whose count we
8826      do not know, we need to assume that all bits up to the highest-order
8827      bit in MASK will be needed.  This is how we form such a mask.  */
8828   if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8829     fuller_mask = HOST_WIDE_INT_M1U;
8830   else
8831     fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8832 		   - 1);
8833 
8834   switch (code)
8835     {
8836     case CLOBBER:
8837       /* If X is a (clobber (const_int)), return it since we know we are
8838 	 generating something that won't match.  */
8839       return x;
8840 
8841     case SIGN_EXTEND:
8842     case ZERO_EXTEND:
8843     case ZERO_EXTRACT:
8844     case SIGN_EXTRACT:
8845       x = expand_compound_operation (x);
8846       if (GET_CODE (x) != code)
8847 	return force_to_mode (x, mode, mask, next_select);
8848       break;
8849 
8850     case TRUNCATE:
8851       /* Similarly for a truncate.  */
8852       return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8853 
8854     case AND:
8855       /* If this is an AND with a constant, convert it into an AND
8856 	 whose constant is the AND of that constant with MASK.  If it
8857 	 remains an AND of MASK, delete it since it is redundant.  */
8858 
8859       if (CONST_INT_P (XEXP (x, 1)))
8860 	{
8861 	  x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8862 				      mask & INTVAL (XEXP (x, 1)));
8863 	  xmode = op_mode;
8864 
8865 	  /* If X is still an AND, see if it is an AND with a mask that
8866 	     is just some low-order bits.  If so, and it is MASK, we don't
8867 	     need it.  */
8868 
8869 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8870 	      && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8871 	    x = XEXP (x, 0);
8872 
8873 	  /* If it remains an AND, try making another AND with the bits
8874 	     in the mode mask that aren't in MASK turned on.  If the
8875 	     constant in the AND is wide enough, this might make a
8876 	     cheaper constant.  */
8877 
8878 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8879 	      && GET_MODE_MASK (xmode) != mask
8880 	      && HWI_COMPUTABLE_MODE_P (xmode))
8881 	    {
8882 	      unsigned HOST_WIDE_INT cval
8883 		= UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8884 	      rtx y;
8885 
8886 	      y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8887 				       gen_int_mode (cval, xmode));
8888 	      if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8889 		  < set_src_cost (x, xmode, optimize_this_for_speed_p))
8890 		x = y;
8891 	    }
8892 
8893 	  break;
8894 	}
8895 
8896       goto binop;
8897 
8898     case PLUS:
8899       /* In (and (plus FOO C1) M), if M is a mask that just turns off
8900 	 low-order bits (as in an alignment operation) and FOO is already
8901 	 aligned to that boundary, mask C1 to that boundary as well.
8902 	 This may eliminate that PLUS and, later, the AND.  */
8903 
8904       {
8905 	unsigned int width = GET_MODE_PRECISION (mode);
8906 	unsigned HOST_WIDE_INT smask = mask;
8907 
8908 	/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8909 	   number, sign extend it.  */
8910 
8911 	if (width < HOST_BITS_PER_WIDE_INT
8912 	    && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8913 	  smask |= HOST_WIDE_INT_M1U << width;
8914 
8915 	if (CONST_INT_P (XEXP (x, 1))
8916 	    && pow2p_hwi (- smask)
8917 	    && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8918 	    && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8919 	  return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8920 					       (INTVAL (XEXP (x, 1)) & smask)),
8921 				mode, smask, next_select);
8922       }
8923 
8924       /* fall through */
8925 
8926     case MULT:
8927       /* Substituting into the operands of a widening MULT is not likely to
8928 	 create RTL matching a machine insn.  */
8929       if (code == MULT
8930 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8931 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8932 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8933 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8934 	  && REG_P (XEXP (XEXP (x, 0), 0))
8935 	  && REG_P (XEXP (XEXP (x, 1), 0)))
8936 	return gen_lowpart_or_truncate (mode, x);
8937 
8938       /* For PLUS, MINUS and MULT, we need any bits less significant than the
8939 	 most significant bit in MASK since carries from those bits will
8940 	 affect the bits we are interested in.  */
8941       mask = fuller_mask;
8942       goto binop;
8943 
8944     case MINUS:
8945       /* If X is (minus C Y) where C's least set bit is larger than any bit
8946 	 in the mask, then we may replace with (neg Y).  */
8947       if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8948 	  && known_alignment (poly_uint64 (const_op0)) > mask)
8949 	{
8950 	  x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8951 	  return force_to_mode (x, mode, mask, next_select);
8952 	}
8953 
8954       /* Similarly, if C contains every bit in the fuller_mask, then we may
8955 	 replace with (not Y).  */
8956       if (CONST_INT_P (XEXP (x, 0))
8957 	  && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8958 	{
8959 	  x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8960 	  return force_to_mode (x, mode, mask, next_select);
8961 	}
8962 
8963       mask = fuller_mask;
8964       goto binop;
8965 
8966     case IOR:
8967     case XOR:
8968       /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8969 	 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8970 	 operation which may be a bitfield extraction.  Ensure that the
8971 	 constant we form is not wider than the mode of X.  */
8972 
8973       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8974 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8975 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8976 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8977 	  && CONST_INT_P (XEXP (x, 1))
8978 	  && ((INTVAL (XEXP (XEXP (x, 0), 1))
8979 	       + floor_log2 (INTVAL (XEXP (x, 1))))
8980 	      < GET_MODE_PRECISION (xmode))
8981 	  && (UINTVAL (XEXP (x, 1))
8982 	      & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8983 	{
8984 	  temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8985 			       << INTVAL (XEXP (XEXP (x, 0), 1)),
8986 			       xmode);
8987 	  temp = simplify_gen_binary (GET_CODE (x), xmode,
8988 				      XEXP (XEXP (x, 0), 0), temp);
8989 	  x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8990 				   XEXP (XEXP (x, 0), 1));
8991 	  return force_to_mode (x, mode, mask, next_select);
8992 	}
8993 
8994     binop:
8995       /* For most binary operations, just propagate into the operation and
8996 	 change the mode if we have an operation of that mode.  */
8997 
8998       op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8999       op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
9000 
9001       /* If we ended up truncating both operands, truncate the result of the
9002 	 operation instead.  */
9003       if (GET_CODE (op0) == TRUNCATE
9004 	  && GET_CODE (op1) == TRUNCATE)
9005 	{
9006 	  op0 = XEXP (op0, 0);
9007 	  op1 = XEXP (op1, 0);
9008 	}
9009 
9010       op0 = gen_lowpart_or_truncate (op_mode, op0);
9011       op1 = gen_lowpart_or_truncate (op_mode, op1);
9012 
9013       if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
9014 	{
9015 	  x = simplify_gen_binary (code, op_mode, op0, op1);
9016 	  xmode = op_mode;
9017 	}
9018       break;
9019 
9020     case ASHIFT:
9021       /* For left shifts, do the same, but just for the first operand.
9022 	 However, we cannot do anything with shifts where we cannot
9023 	 guarantee that the counts are smaller than the size of the mode
9024 	 because such a count will have a different meaning in a
9025 	 wider mode.  */
9026 
9027       if (! (CONST_INT_P (XEXP (x, 1))
9028 	     && INTVAL (XEXP (x, 1)) >= 0
9029 	     && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
9030 	  && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
9031 		&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
9032 		    < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
9033 	break;
9034 
9035       /* If the shift count is a constant and we can do arithmetic in
9036 	 the mode of the shift, refine which bits we need.  Otherwise, use the
9037 	 conservative form of the mask.  */
9038       if (CONST_INT_P (XEXP (x, 1))
9039 	  && INTVAL (XEXP (x, 1)) >= 0
9040 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
9041 	  && HWI_COMPUTABLE_MODE_P (op_mode))
9042 	mask >>= INTVAL (XEXP (x, 1));
9043       else
9044 	mask = fuller_mask;
9045 
9046       op0 = gen_lowpart_or_truncate (op_mode,
9047 				     force_to_mode (XEXP (x, 0), mode,
9048 						    mask, next_select));
9049 
9050       if (op_mode != xmode || op0 != XEXP (x, 0))
9051 	{
9052 	  x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
9053 	  xmode = op_mode;
9054 	}
9055       break;
9056 
9057     case LSHIFTRT:
9058       /* Here we can only do something if the shift count is a constant,
9059 	 this shift constant is valid for the host, and we can do arithmetic
9060 	 in OP_MODE.  */
9061 
9062       if (CONST_INT_P (XEXP (x, 1))
9063 	  && INTVAL (XEXP (x, 1)) >= 0
9064 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
9065 	  && HWI_COMPUTABLE_MODE_P (op_mode))
9066 	{
9067 	  rtx inner = XEXP (x, 0);
9068 	  unsigned HOST_WIDE_INT inner_mask;
9069 
9070 	  /* Select the mask of the bits we need for the shift operand.  */
9071 	  inner_mask = mask << INTVAL (XEXP (x, 1));
9072 
9073 	  /* We can only change the mode of the shift if we can do arithmetic
9074 	     in the mode of the shift and INNER_MASK is no wider than the
9075 	     width of X's mode.  */
9076 	  if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
9077 	    op_mode = xmode;
9078 
9079 	  inner = force_to_mode (inner, op_mode, inner_mask, next_select);
9080 
9081 	  if (xmode != op_mode || inner != XEXP (x, 0))
9082 	    {
9083 	      x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
9084 	      xmode = op_mode;
9085 	    }
9086 	}
9087 
9088       /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9089 	 shift and AND produces only copies of the sign bit (C2 is one less
9090 	 than a power of two), we can do this with just a shift.  */
9091 
9092       if (GET_CODE (x) == LSHIFTRT
9093 	  && CONST_INT_P (XEXP (x, 1))
9094 	  /* The shift puts one of the sign bit copies in the least significant
9095 	     bit.  */
9096 	  && ((INTVAL (XEXP (x, 1))
9097 	       + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9098 	      >= GET_MODE_PRECISION (xmode))
9099 	  && pow2p_hwi (mask + 1)
9100 	  /* Number of bits left after the shift must be more than the mask
9101 	     needs.  */
9102 	  && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9103 	      <= GET_MODE_PRECISION (xmode))
9104 	  /* Must be more sign bit copies than the mask needs.  */
9105 	  && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9106 	      >= exact_log2 (mask + 1)))
9107 	{
9108 	  int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9109 	  x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9110 				   gen_int_shift_amount (xmode, nbits));
9111 	}
9112       goto shiftrt;
9113 
9114     case ASHIFTRT:
9115       /* If we are just looking for the sign bit, we don't need this shift at
9116 	 all, even if it has a variable count.  */
9117       if (val_signbit_p (xmode, mask))
9118 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9119 
9120       /* If this is a shift by a constant, get a mask that contains those bits
9121 	 that are not copies of the sign bit.  We then have two cases:  If
9122 	 MASK only includes those bits, this can be a logical shift, which may
9123 	 allow simplifications.  If MASK is a single-bit field not within
9124 	 those bits, we are requesting a copy of the sign bit and hence can
9125 	 shift the sign bit to the appropriate location.  */
9126 
9127       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9128 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9129 	{
9130 	  unsigned HOST_WIDE_INT nonzero;
9131 	  int i;
9132 
9133 	  /* If the considered data is wider than HOST_WIDE_INT, we can't
9134 	     represent a mask for all its bits in a single scalar.
9135 	     But we only care about the lower bits, so calculate these.  */
9136 
9137 	  if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9138 	    {
9139 	      nonzero = HOST_WIDE_INT_M1U;
9140 
9141 	      /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9142 		 is the number of bits a full-width mask would have set.
9143 		 We need only shift if these are fewer than nonzero can
9144 		 hold.  If not, we must keep all bits set in nonzero.  */
9145 
9146 	      if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9147 		  < HOST_BITS_PER_WIDE_INT)
9148 		nonzero >>= INTVAL (XEXP (x, 1))
9149 			    + HOST_BITS_PER_WIDE_INT
9150 			    - GET_MODE_PRECISION (xmode);
9151 	    }
9152 	  else
9153 	    {
9154 	      nonzero = GET_MODE_MASK (xmode);
9155 	      nonzero >>= INTVAL (XEXP (x, 1));
9156 	    }
9157 
9158 	  if ((mask & ~nonzero) == 0)
9159 	    {
9160 	      x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9161 					XEXP (x, 0), INTVAL (XEXP (x, 1)));
9162 	      if (GET_CODE (x) != ASHIFTRT)
9163 		return force_to_mode (x, mode, mask, next_select);
9164 	    }
9165 
9166 	  else if ((i = exact_log2 (mask)) >= 0)
9167 	    {
9168 	      x = simplify_shift_const
9169 		  (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9170 		   GET_MODE_PRECISION (xmode) - 1 - i);
9171 
9172 	      if (GET_CODE (x) != ASHIFTRT)
9173 		return force_to_mode (x, mode, mask, next_select);
9174 	    }
9175 	}
9176 
9177       /* If MASK is 1, convert this to an LSHIFTRT.  This can be done
9178 	 even if the shift count isn't a constant.  */
9179       if (mask == 1)
9180 	x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9181 
9182     shiftrt:
9183 
9184       /* If this is a zero- or sign-extension operation that just affects bits
9185 	 we don't care about, remove it.  Be sure the call above returned
9186 	 something that is still a shift.  */
9187 
9188       if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9189 	  && CONST_INT_P (XEXP (x, 1))
9190 	  && INTVAL (XEXP (x, 1)) >= 0
9191 	  && (INTVAL (XEXP (x, 1))
9192 	      <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9193 	  && GET_CODE (XEXP (x, 0)) == ASHIFT
9194 	  && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9195 	return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9196 			      next_select);
9197 
9198       break;
9199 
9200     case ROTATE:
9201     case ROTATERT:
9202       /* If the shift count is constant and we can do computations
9203 	 in the mode of X, compute where the bits we care about are.
9204 	 Otherwise, we can't do anything.  Don't change the mode of
9205 	 the shift or propagate MODE into the shift, though.  */
9206       if (CONST_INT_P (XEXP (x, 1))
9207 	  && INTVAL (XEXP (x, 1)) >= 0)
9208 	{
9209 	  temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9210 					    xmode, gen_int_mode (mask, xmode),
9211 					    XEXP (x, 1));
9212 	  if (temp && CONST_INT_P (temp))
9213 	    x = simplify_gen_binary (code, xmode,
9214 				     force_to_mode (XEXP (x, 0), xmode,
9215 						    INTVAL (temp), next_select),
9216 				     XEXP (x, 1));
9217 	}
9218       break;
9219 
9220     case NEG:
9221       /* If we just want the low-order bit, the NEG isn't needed since it
9222 	 won't change the low-order bit.  */
9223       if (mask == 1)
9224 	return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9225 
9226       /* We need any bits less significant than the most significant bit in
9227 	 MASK since carries from those bits will affect the bits we are
9228 	 interested in.  */
9229       mask = fuller_mask;
9230       goto unop;
9231 
9232     case NOT:
9233       /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9234 	 same as the XOR case above.  Ensure that the constant we form is not
9235 	 wider than the mode of X.  */
9236 
9237       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9238 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9239 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9240 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9241 	      < GET_MODE_PRECISION (xmode))
9242 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9243 	{
9244 	  temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9245 	  temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9246 	  x = simplify_gen_binary (LSHIFTRT, xmode,
9247 				   temp, XEXP (XEXP (x, 0), 1));
9248 
9249 	  return force_to_mode (x, mode, mask, next_select);
9250 	}
9251 
9252       /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9253 	 use the full mask inside the NOT.  */
9254       mask = fuller_mask;
9255 
9256     unop:
9257       op0 = gen_lowpart_or_truncate (op_mode,
9258 				     force_to_mode (XEXP (x, 0), mode, mask,
9259 						    next_select));
9260       if (op_mode != xmode || op0 != XEXP (x, 0))
9261 	{
9262 	  x = simplify_gen_unary (code, op_mode, op0, op_mode);
9263 	  xmode = op_mode;
9264 	}
9265       break;
9266 
9267     case NE:
9268       /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9269 	 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9270 	 which is equal to STORE_FLAG_VALUE.  */
9271       if ((mask & ~STORE_FLAG_VALUE) == 0
9272 	  && XEXP (x, 1) == const0_rtx
9273 	  && GET_MODE (XEXP (x, 0)) == mode
9274 	  && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9275 	  && (nonzero_bits (XEXP (x, 0), mode)
9276 	      == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9277 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9278 
9279       break;
9280 
9281     case IF_THEN_ELSE:
9282       /* We have no way of knowing if the IF_THEN_ELSE can itself be
9283 	 written in a narrower mode.  We play it safe and do not do so.  */
9284 
9285       op0 = gen_lowpart_or_truncate (xmode,
9286 				     force_to_mode (XEXP (x, 1), mode,
9287 						    mask, next_select));
9288       op1 = gen_lowpart_or_truncate (xmode,
9289 				     force_to_mode (XEXP (x, 2), mode,
9290 						    mask, next_select));
9291       if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9292 	x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9293 				  GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9294 				  op0, op1);
9295       break;
9296 
9297     default:
9298       break;
9299     }
9300 
9301   /* Ensure we return a value of the proper mode.  */
9302   return gen_lowpart_or_truncate (mode, x);
9303 }
9304 
9305 /* Return nonzero if X is an expression that has one of two values depending on
9306    whether some other value is zero or nonzero.  In that case, we return the
9307    value that is being tested, *PTRUE is set to the value if the rtx being
9308    returned has a nonzero value, and *PFALSE is set to the other alternative.
9309 
9310    If we return zero, we set *PTRUE and *PFALSE to X.  */
9311 
9312 static rtx
9313 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9314 {
9315   machine_mode mode = GET_MODE (x);
9316   enum rtx_code code = GET_CODE (x);
9317   rtx cond0, cond1, true0, true1, false0, false1;
9318   unsigned HOST_WIDE_INT nz;
9319   scalar_int_mode int_mode;
9320 
9321   /* If we are comparing a value against zero, we are done.  */
9322   if ((code == NE || code == EQ)
9323       && XEXP (x, 1) == const0_rtx)
9324     {
9325       *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9326       *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9327       return XEXP (x, 0);
9328     }
9329 
9330   /* If this is a unary operation whose operand has one of two values, apply
9331      our opcode to compute those values.  */
9332   else if (UNARY_P (x)
9333 	   && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9334     {
9335       *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9336       *pfalse = simplify_gen_unary (code, mode, false0,
9337 				    GET_MODE (XEXP (x, 0)));
9338       return cond0;
9339     }
9340 
9341   /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9342      make can't possibly match and would suppress other optimizations.  */
9343   else if (code == COMPARE)
9344     ;
9345 
9346   /* If this is a binary operation, see if either side has only one of two
9347      values.  If either one does or if both do and they are conditional on
9348      the same value, compute the new true and false values.  */
9349   else if (BINARY_P (x))
9350     {
9351       rtx op0 = XEXP (x, 0);
9352       rtx op1 = XEXP (x, 1);
9353       cond0 = if_then_else_cond (op0, &true0, &false0);
9354       cond1 = if_then_else_cond (op1, &true1, &false1);
9355 
9356       if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9357 	  && (REG_P (op0) || REG_P (op1)))
9358 	{
9359 	  /* Try to enable a simplification by undoing work done by
9360 	     if_then_else_cond if it converted a REG into something more
9361 	     complex.  */
9362 	  if (REG_P (op0))
9363 	    {
9364 	      cond0 = 0;
9365 	      true0 = false0 = op0;
9366 	    }
9367 	  else
9368 	    {
9369 	      cond1 = 0;
9370 	      true1 = false1 = op1;
9371 	    }
9372 	}
9373 
9374       if ((cond0 != 0 || cond1 != 0)
9375 	  && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9376 	{
9377 	  /* If if_then_else_cond returned zero, then true/false are the
9378 	     same rtl.  We must copy one of them to prevent invalid rtl
9379 	     sharing.  */
9380 	  if (cond0 == 0)
9381 	    true0 = copy_rtx (true0);
9382 	  else if (cond1 == 0)
9383 	    true1 = copy_rtx (true1);
9384 
9385 	  if (COMPARISON_P (x))
9386 	    {
9387 	      *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9388 						true0, true1);
9389 	      *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9390 						 false0, false1);
9391 	     }
9392 	  else
9393 	    {
9394 	      *ptrue = simplify_gen_binary (code, mode, true0, true1);
9395 	      *pfalse = simplify_gen_binary (code, mode, false0, false1);
9396 	    }
9397 
9398 	  return cond0 ? cond0 : cond1;
9399 	}
9400 
9401       /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9402 	 operands is zero when the other is nonzero, and vice-versa,
9403 	 and STORE_FLAG_VALUE is 1 or -1.  */
9404 
9405       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9406 	  && (code == PLUS || code == IOR || code == XOR || code == MINUS
9407 	      || code == UMAX)
9408 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9409 	{
9410 	  rtx op0 = XEXP (XEXP (x, 0), 1);
9411 	  rtx op1 = XEXP (XEXP (x, 1), 1);
9412 
9413 	  cond0 = XEXP (XEXP (x, 0), 0);
9414 	  cond1 = XEXP (XEXP (x, 1), 0);
9415 
9416 	  if (COMPARISON_P (cond0)
9417 	      && COMPARISON_P (cond1)
9418 	      && SCALAR_INT_MODE_P (mode)
9419 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9420 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9421 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9422 		  || ((swap_condition (GET_CODE (cond0))
9423 		       == reversed_comparison_code (cond1, NULL))
9424 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9425 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9426 	      && ! side_effects_p (x))
9427 	    {
9428 	      *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9429 	      *pfalse = simplify_gen_binary (MULT, mode,
9430 					     (code == MINUS
9431 					      ? simplify_gen_unary (NEG, mode,
9432 								    op1, mode)
9433 					      : op1),
9434 					      const_true_rtx);
9435 	      return cond0;
9436 	    }
9437 	}
9438 
9439       /* Similarly for MULT, AND and UMIN, except that for these the result
9440 	 is always zero.  */
9441       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9442 	  && (code == MULT || code == AND || code == UMIN)
9443 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9444 	{
9445 	  cond0 = XEXP (XEXP (x, 0), 0);
9446 	  cond1 = XEXP (XEXP (x, 1), 0);
9447 
9448 	  if (COMPARISON_P (cond0)
9449 	      && COMPARISON_P (cond1)
9450 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9451 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9452 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9453 		  || ((swap_condition (GET_CODE (cond0))
9454 		       == reversed_comparison_code (cond1, NULL))
9455 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9456 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9457 	      && ! side_effects_p (x))
9458 	    {
9459 	      *ptrue = *pfalse = const0_rtx;
9460 	      return cond0;
9461 	    }
9462 	}
9463     }
9464 
9465   else if (code == IF_THEN_ELSE)
9466     {
9467       /* If we have IF_THEN_ELSE already, extract the condition and
9468 	 canonicalize it if it is NE or EQ.  */
9469       cond0 = XEXP (x, 0);
9470       *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9471       if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9472 	return XEXP (cond0, 0);
9473       else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9474 	{
9475 	  *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9476 	  return XEXP (cond0, 0);
9477 	}
9478       else
9479 	return cond0;
9480     }
9481 
9482   /* If X is a SUBREG, we can narrow both the true and false values
9483      if the inner expression, if there is a condition.  */
9484   else if (code == SUBREG
9485 	   && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9486 					  &false0)) != 0)
9487     {
9488       true0 = simplify_gen_subreg (mode, true0,
9489 				   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9490       false0 = simplify_gen_subreg (mode, false0,
9491 				    GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9492       if (true0 && false0)
9493 	{
9494 	  *ptrue = true0;
9495 	  *pfalse = false0;
9496 	  return cond0;
9497 	}
9498     }
9499 
9500   /* If X is a constant, this isn't special and will cause confusions
9501      if we treat it as such.  Likewise if it is equivalent to a constant.  */
9502   else if (CONSTANT_P (x)
9503 	   || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9504     ;
9505 
9506   /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9507      will be least confusing to the rest of the compiler.  */
9508   else if (mode == BImode)
9509     {
9510       *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9511       return x;
9512     }
9513 
9514   /* If X is known to be either 0 or -1, those are the true and
9515      false values when testing X.  */
9516   else if (x == constm1_rtx || x == const0_rtx
9517 	   || (is_a <scalar_int_mode> (mode, &int_mode)
9518 	       && (num_sign_bit_copies (x, int_mode)
9519 		   == GET_MODE_PRECISION (int_mode))))
9520     {
9521       *ptrue = constm1_rtx, *pfalse = const0_rtx;
9522       return x;
9523     }
9524 
9525   /* Likewise for 0 or a single bit.  */
9526   else if (HWI_COMPUTABLE_MODE_P (mode)
9527 	   && pow2p_hwi (nz = nonzero_bits (x, mode)))
9528     {
9529       *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9530       return x;
9531     }
9532 
9533   /* Otherwise fail; show no condition with true and false values the same.  */
9534   *ptrue = *pfalse = x;
9535   return 0;
9536 }
9537 
9538 /* Return the value of expression X given the fact that condition COND
9539    is known to be true when applied to REG as its first operand and VAL
9540    as its second.  X is known to not be shared and so can be modified in
9541    place.
9542 
9543    We only handle the simplest cases, and specifically those cases that
9544    arise with IF_THEN_ELSE expressions.  */
9545 
9546 static rtx
9547 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9548 {
9549   enum rtx_code code = GET_CODE (x);
9550   const char *fmt;
9551   int i, j;
9552 
9553   if (side_effects_p (x))
9554     return x;
9555 
9556   /* If either operand of the condition is a floating point value,
9557      then we have to avoid collapsing an EQ comparison.  */
9558   if (cond == EQ
9559       && rtx_equal_p (x, reg)
9560       && ! FLOAT_MODE_P (GET_MODE (x))
9561       && ! FLOAT_MODE_P (GET_MODE (val)))
9562     return val;
9563 
9564   if (cond == UNEQ && rtx_equal_p (x, reg))
9565     return val;
9566 
9567   /* If X is (abs REG) and we know something about REG's relationship
9568      with zero, we may be able to simplify this.  */
9569 
9570   if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9571     switch (cond)
9572       {
9573       case GE:  case GT:  case EQ:
9574 	return XEXP (x, 0);
9575       case LT:  case LE:
9576 	return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9577 				   XEXP (x, 0),
9578 				   GET_MODE (XEXP (x, 0)));
9579       default:
9580 	break;
9581       }
9582 
9583   /* The only other cases we handle are MIN, MAX, and comparisons if the
9584      operands are the same as REG and VAL.  */
9585 
9586   else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9587     {
9588       if (rtx_equal_p (XEXP (x, 0), val))
9589         {
9590 	  std::swap (val, reg);
9591 	  cond = swap_condition (cond);
9592         }
9593 
9594       if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9595 	{
9596 	  if (COMPARISON_P (x))
9597 	    {
9598 	      if (comparison_dominates_p (cond, code))
9599 		return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9600 
9601 	      code = reversed_comparison_code (x, NULL);
9602 	      if (code != UNKNOWN
9603 		  && comparison_dominates_p (cond, code))
9604 		return CONST0_RTX (GET_MODE (x));
9605 	      else
9606 		return x;
9607 	    }
9608 	  else if (code == SMAX || code == SMIN
9609 		   || code == UMIN || code == UMAX)
9610 	    {
9611 	      int unsignedp = (code == UMIN || code == UMAX);
9612 
9613 	      /* Do not reverse the condition when it is NE or EQ.
9614 		 This is because we cannot conclude anything about
9615 		 the value of 'SMAX (x, y)' when x is not equal to y,
9616 		 but we can when x equals y.  */
9617 	      if ((code == SMAX || code == UMAX)
9618 		  && ! (cond == EQ || cond == NE))
9619 		cond = reverse_condition (cond);
9620 
9621 	      switch (cond)
9622 		{
9623 		case GE:   case GT:
9624 		  return unsignedp ? x : XEXP (x, 1);
9625 		case LE:   case LT:
9626 		  return unsignedp ? x : XEXP (x, 0);
9627 		case GEU:  case GTU:
9628 		  return unsignedp ? XEXP (x, 1) : x;
9629 		case LEU:  case LTU:
9630 		  return unsignedp ? XEXP (x, 0) : x;
9631 		default:
9632 		  break;
9633 		}
9634 	    }
9635 	}
9636     }
9637   else if (code == SUBREG)
9638     {
9639       machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9640       rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9641 
9642       if (SUBREG_REG (x) != r)
9643 	{
9644 	  /* We must simplify subreg here, before we lose track of the
9645 	     original inner_mode.  */
9646 	  new_rtx = simplify_subreg (GET_MODE (x), r,
9647 				     inner_mode, SUBREG_BYTE (x));
9648 	  if (new_rtx)
9649 	    return new_rtx;
9650 	  else
9651 	    SUBST (SUBREG_REG (x), r);
9652 	}
9653 
9654       return x;
9655     }
9656   /* We don't have to handle SIGN_EXTEND here, because even in the
9657      case of replacing something with a modeless CONST_INT, a
9658      CONST_INT is already (supposed to be) a valid sign extension for
9659      its narrower mode, which implies it's already properly
9660      sign-extended for the wider mode.  Now, for ZERO_EXTEND, the
9661      story is different.  */
9662   else if (code == ZERO_EXTEND)
9663     {
9664       machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9665       rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9666 
9667       if (XEXP (x, 0) != r)
9668 	{
9669 	  /* We must simplify the zero_extend here, before we lose
9670 	     track of the original inner_mode.  */
9671 	  new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9672 					      r, inner_mode);
9673 	  if (new_rtx)
9674 	    return new_rtx;
9675 	  else
9676 	    SUBST (XEXP (x, 0), r);
9677 	}
9678 
9679       return x;
9680     }
9681 
9682   fmt = GET_RTX_FORMAT (code);
9683   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9684     {
9685       if (fmt[i] == 'e')
9686 	SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9687       else if (fmt[i] == 'E')
9688 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9689 	  SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9690 						cond, reg, val));
9691     }
9692 
9693   return x;
9694 }
9695 
9696 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9697    assignment as a field assignment.  */
9698 
9699 static int
9700 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9701 {
9702   if (widen_x && GET_MODE (x) != GET_MODE (y))
9703     {
9704       if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9705 	return 0;
9706       if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9707 	return 0;
9708       x = adjust_address_nv (x, GET_MODE (y),
9709 			     byte_lowpart_offset (GET_MODE (y),
9710 						  GET_MODE (x)));
9711     }
9712 
9713   if (x == y || rtx_equal_p (x, y))
9714     return 1;
9715 
9716   if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9717     return 0;
9718 
9719   /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9720      Note that all SUBREGs of MEM are paradoxical; otherwise they
9721      would have been rewritten.  */
9722   if (MEM_P (x) && GET_CODE (y) == SUBREG
9723       && MEM_P (SUBREG_REG (y))
9724       && rtx_equal_p (SUBREG_REG (y),
9725 		      gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9726     return 1;
9727 
9728   if (MEM_P (y) && GET_CODE (x) == SUBREG
9729       && MEM_P (SUBREG_REG (x))
9730       && rtx_equal_p (SUBREG_REG (x),
9731 		      gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9732     return 1;
9733 
9734   /* We used to see if get_last_value of X and Y were the same but that's
9735      not correct.  In one direction, we'll cause the assignment to have
9736      the wrong destination and in the case, we'll import a register into this
9737      insn that might have already have been dead.   So fail if none of the
9738      above cases are true.  */
9739   return 0;
9740 }
9741 
9742 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9743    Return that assignment if so.
9744 
9745    We only handle the most common cases.  */
9746 
9747 static rtx
9748 make_field_assignment (rtx x)
9749 {
9750   rtx dest = SET_DEST (x);
9751   rtx src = SET_SRC (x);
9752   rtx assign;
9753   rtx rhs, lhs;
9754   HOST_WIDE_INT c1;
9755   HOST_WIDE_INT pos;
9756   unsigned HOST_WIDE_INT len;
9757   rtx other;
9758 
9759   /* All the rules in this function are specific to scalar integers.  */
9760   scalar_int_mode mode;
9761   if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9762     return x;
9763 
9764   /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9765      a clear of a one-bit field.  We will have changed it to
9766      (and (rotate (const_int -2) POS) DEST), so check for that.  Also check
9767      for a SUBREG.  */
9768 
9769   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9770       && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9771       && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9772       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9773     {
9774       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9775 				1, 1, 1, 0);
9776       if (assign != 0)
9777 	return gen_rtx_SET (assign, const0_rtx);
9778       return x;
9779     }
9780 
9781   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9782       && subreg_lowpart_p (XEXP (src, 0))
9783       && partial_subreg_p (XEXP (src, 0))
9784       && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9785       && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9786       && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9787       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9788     {
9789       assign = make_extraction (VOIDmode, dest, 0,
9790 				XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9791 				1, 1, 1, 0);
9792       if (assign != 0)
9793 	return gen_rtx_SET (assign, const0_rtx);
9794       return x;
9795     }
9796 
9797   /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9798      one-bit field.  */
9799   if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9800       && XEXP (XEXP (src, 0), 0) == const1_rtx
9801       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9802     {
9803       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9804 				1, 1, 1, 0);
9805       if (assign != 0)
9806 	return gen_rtx_SET (assign, const1_rtx);
9807       return x;
9808     }
9809 
9810   /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9811      SRC is an AND with all bits of that field set, then we can discard
9812      the AND.  */
9813   if (GET_CODE (dest) == ZERO_EXTRACT
9814       && CONST_INT_P (XEXP (dest, 1))
9815       && GET_CODE (src) == AND
9816       && CONST_INT_P (XEXP (src, 1)))
9817     {
9818       HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9819       unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9820       unsigned HOST_WIDE_INT ze_mask;
9821 
9822       if (width >= HOST_BITS_PER_WIDE_INT)
9823 	ze_mask = -1;
9824       else
9825 	ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9826 
9827       /* Complete overlap.  We can remove the source AND.  */
9828       if ((and_mask & ze_mask) == ze_mask)
9829 	return gen_rtx_SET (dest, XEXP (src, 0));
9830 
9831       /* Partial overlap.  We can reduce the source AND.  */
9832       if ((and_mask & ze_mask) != and_mask)
9833 	{
9834 	  src = gen_rtx_AND (mode, XEXP (src, 0),
9835 			     gen_int_mode (and_mask & ze_mask, mode));
9836 	  return gen_rtx_SET (dest, src);
9837 	}
9838     }
9839 
9840   /* The other case we handle is assignments into a constant-position
9841      field.  They look like (ior/xor (and DEST C1) OTHER).  If C1 represents
9842      a mask that has all one bits except for a group of zero bits and
9843      OTHER is known to have zeros where C1 has ones, this is such an
9844      assignment.  Compute the position and length from C1.  Shift OTHER
9845      to the appropriate position, force it to the required mode, and
9846      make the extraction.  Check for the AND in both operands.  */
9847 
9848   /* One or more SUBREGs might obscure the constant-position field
9849      assignment.  The first one we are likely to encounter is an outer
9850      narrowing SUBREG, which we can just strip for the purposes of
9851      identifying the constant-field assignment.  */
9852   scalar_int_mode src_mode = mode;
9853   if (GET_CODE (src) == SUBREG
9854       && subreg_lowpart_p (src)
9855       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9856     src = SUBREG_REG (src);
9857 
9858   if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9859     return x;
9860 
9861   rhs = expand_compound_operation (XEXP (src, 0));
9862   lhs = expand_compound_operation (XEXP (src, 1));
9863 
9864   if (GET_CODE (rhs) == AND
9865       && CONST_INT_P (XEXP (rhs, 1))
9866       && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9867     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9868   /* The second SUBREG that might get in the way is a paradoxical
9869      SUBREG around the first operand of the AND.  We want to
9870      pretend the operand is as wide as the destination here.   We
9871      do this by adjusting the MEM to wider mode for the sole
9872      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9873      note this trick only works for MEMs.  */
9874   else if (GET_CODE (rhs) == AND
9875 	   && paradoxical_subreg_p (XEXP (rhs, 0))
9876 	   && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9877 	   && CONST_INT_P (XEXP (rhs, 1))
9878 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9879 						dest, true))
9880     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9881   else if (GET_CODE (lhs) == AND
9882 	   && CONST_INT_P (XEXP (lhs, 1))
9883 	   && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9884     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9885   /* The second SUBREG that might get in the way is a paradoxical
9886      SUBREG around the first operand of the AND.  We want to
9887      pretend the operand is as wide as the destination here.   We
9888      do this by adjusting the MEM to wider mode for the sole
9889      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9890      note this trick only works for MEMs.  */
9891   else if (GET_CODE (lhs) == AND
9892 	   && paradoxical_subreg_p (XEXP (lhs, 0))
9893 	   && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9894 	   && CONST_INT_P (XEXP (lhs, 1))
9895 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9896 						dest, true))
9897     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9898   else
9899     return x;
9900 
9901   pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9902   if (pos < 0
9903       || pos + len > GET_MODE_PRECISION (mode)
9904       || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9905       || (c1 & nonzero_bits (other, mode)) != 0)
9906     return x;
9907 
9908   assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9909   if (assign == 0)
9910     return x;
9911 
9912   /* The mode to use for the source is the mode of the assignment, or of
9913      what is inside a possible STRICT_LOW_PART.  */
9914   machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9915 			   ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9916 
9917   /* Shift OTHER right POS places and make it the source, restricting it
9918      to the proper length and mode.  */
9919 
9920   src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9921 						     src_mode, other, pos),
9922 			       dest);
9923   src = force_to_mode (src, new_mode,
9924 		       len >= HOST_BITS_PER_WIDE_INT
9925 		       ? HOST_WIDE_INT_M1U
9926 		       : (HOST_WIDE_INT_1U << len) - 1,
9927 		       0);
9928 
9929   /* If SRC is masked by an AND that does not make a difference in
9930      the value being stored, strip it.  */
9931   if (GET_CODE (assign) == ZERO_EXTRACT
9932       && CONST_INT_P (XEXP (assign, 1))
9933       && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9934       && GET_CODE (src) == AND
9935       && CONST_INT_P (XEXP (src, 1))
9936       && UINTVAL (XEXP (src, 1))
9937 	 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9938     src = XEXP (src, 0);
9939 
9940   return gen_rtx_SET (assign, src);
9941 }
9942 
9943 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9944    if so.  */
9945 
9946 static rtx
9947 apply_distributive_law (rtx x)
9948 {
9949   enum rtx_code code = GET_CODE (x);
9950   enum rtx_code inner_code;
9951   rtx lhs, rhs, other;
9952   rtx tem;
9953 
9954   /* Distributivity is not true for floating point as it can change the
9955      value.  So we don't do it unless -funsafe-math-optimizations.  */
9956   if (FLOAT_MODE_P (GET_MODE (x))
9957       && ! flag_unsafe_math_optimizations)
9958     return x;
9959 
9960   /* The outer operation can only be one of the following:  */
9961   if (code != IOR && code != AND && code != XOR
9962       && code != PLUS && code != MINUS)
9963     return x;
9964 
9965   lhs = XEXP (x, 0);
9966   rhs = XEXP (x, 1);
9967 
9968   /* If either operand is a primitive we can't do anything, so get out
9969      fast.  */
9970   if (OBJECT_P (lhs) || OBJECT_P (rhs))
9971     return x;
9972 
9973   lhs = expand_compound_operation (lhs);
9974   rhs = expand_compound_operation (rhs);
9975   inner_code = GET_CODE (lhs);
9976   if (inner_code != GET_CODE (rhs))
9977     return x;
9978 
9979   /* See if the inner and outer operations distribute.  */
9980   switch (inner_code)
9981     {
9982     case LSHIFTRT:
9983     case ASHIFTRT:
9984     case AND:
9985     case IOR:
9986       /* These all distribute except over PLUS.  */
9987       if (code == PLUS || code == MINUS)
9988 	return x;
9989       break;
9990 
9991     case MULT:
9992       if (code != PLUS && code != MINUS)
9993 	return x;
9994       break;
9995 
9996     case ASHIFT:
9997       /* This is also a multiply, so it distributes over everything.  */
9998       break;
9999 
10000     /* This used to handle SUBREG, but this turned out to be counter-
10001        productive, since (subreg (op ...)) usually is not handled by
10002        insn patterns, and this "optimization" therefore transformed
10003        recognizable patterns into unrecognizable ones.  Therefore the
10004        SUBREG case was removed from here.
10005 
10006        It is possible that distributing SUBREG over arithmetic operations
10007        leads to an intermediate result than can then be optimized further,
10008        e.g. by moving the outer SUBREG to the other side of a SET as done
10009        in simplify_set.  This seems to have been the original intent of
10010        handling SUBREGs here.
10011 
10012        However, with current GCC this does not appear to actually happen,
10013        at least on major platforms.  If some case is found where removing
10014        the SUBREG case here prevents follow-on optimizations, distributing
10015        SUBREGs ought to be re-added at that place, e.g. in simplify_set.  */
10016 
10017     default:
10018       return x;
10019     }
10020 
10021   /* Set LHS and RHS to the inner operands (A and B in the example
10022      above) and set OTHER to the common operand (C in the example).
10023      There is only one way to do this unless the inner operation is
10024      commutative.  */
10025   if (COMMUTATIVE_ARITH_P (lhs)
10026       && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
10027     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
10028   else if (COMMUTATIVE_ARITH_P (lhs)
10029 	   && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
10030     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
10031   else if (COMMUTATIVE_ARITH_P (lhs)
10032 	   && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
10033     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
10034   else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
10035     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
10036   else
10037     return x;
10038 
10039   /* Form the new inner operation, seeing if it simplifies first.  */
10040   tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
10041 
10042   /* There is one exception to the general way of distributing:
10043      (a | c) ^ (b | c) -> (a ^ b) & ~c  */
10044   if (code == XOR && inner_code == IOR)
10045     {
10046       inner_code = AND;
10047       other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
10048     }
10049 
10050   /* We may be able to continuing distributing the result, so call
10051      ourselves recursively on the inner operation before forming the
10052      outer operation, which we return.  */
10053   return simplify_gen_binary (inner_code, GET_MODE (x),
10054 			      apply_distributive_law (tem), other);
10055 }
10056 
10057 /* See if X is of the form (* (+ A B) C), and if so convert to
10058    (+ (* A C) (* B C)) and try to simplify.
10059 
10060    Most of the time, this results in no change.  However, if some of
10061    the operands are the same or inverses of each other, simplifications
10062    will result.
10063 
10064    For example, (and (ior A B) (not B)) can occur as the result of
10065    expanding a bit field assignment.  When we apply the distributive
10066    law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10067    which then simplifies to (and (A (not B))).
10068 
10069    Note that no checks happen on the validity of applying the inverse
10070    distributive law.  This is pointless since we can do it in the
10071    few places where this routine is called.
10072 
10073    N is the index of the term that is decomposed (the arithmetic operation,
10074    i.e. (+ A B) in the first example above).  !N is the index of the term that
10075    is distributed, i.e. of C in the first example above.  */
10076 static rtx
10077 distribute_and_simplify_rtx (rtx x, int n)
10078 {
10079   machine_mode mode;
10080   enum rtx_code outer_code, inner_code;
10081   rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
10082 
10083   /* Distributivity is not true for floating point as it can change the
10084      value.  So we don't do it unless -funsafe-math-optimizations.  */
10085   if (FLOAT_MODE_P (GET_MODE (x))
10086       && ! flag_unsafe_math_optimizations)
10087     return NULL_RTX;
10088 
10089   decomposed = XEXP (x, n);
10090   if (!ARITHMETIC_P (decomposed))
10091     return NULL_RTX;
10092 
10093   mode = GET_MODE (x);
10094   outer_code = GET_CODE (x);
10095   distributed = XEXP (x, !n);
10096 
10097   inner_code = GET_CODE (decomposed);
10098   inner_op0 = XEXP (decomposed, 0);
10099   inner_op1 = XEXP (decomposed, 1);
10100 
10101   /* Special case (and (xor B C) (not A)), which is equivalent to
10102      (xor (ior A B) (ior A C))  */
10103   if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10104     {
10105       distributed = XEXP (distributed, 0);
10106       outer_code = IOR;
10107     }
10108 
10109   if (n == 0)
10110     {
10111       /* Distribute the second term.  */
10112       new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10113       new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10114     }
10115   else
10116     {
10117       /* Distribute the first term.  */
10118       new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10119       new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10120     }
10121 
10122   tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10123 						     new_op0, new_op1));
10124   if (GET_CODE (tmp) != outer_code
10125       && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10126 	  < set_src_cost (x, mode, optimize_this_for_speed_p)))
10127     return tmp;
10128 
10129   return NULL_RTX;
10130 }
10131 
10132 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10133    in MODE.  Return an equivalent form, if different from (and VAROP
10134    (const_int CONSTOP)).  Otherwise, return NULL_RTX.  */
10135 
10136 static rtx
10137 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10138 			  unsigned HOST_WIDE_INT constop)
10139 {
10140   unsigned HOST_WIDE_INT nonzero;
10141   unsigned HOST_WIDE_INT orig_constop;
10142   rtx orig_varop;
10143   int i;
10144 
10145   orig_varop = varop;
10146   orig_constop = constop;
10147   if (GET_CODE (varop) == CLOBBER)
10148     return NULL_RTX;
10149 
10150   /* Simplify VAROP knowing that we will be only looking at some of the
10151      bits in it.
10152 
10153      Note by passing in CONSTOP, we guarantee that the bits not set in
10154      CONSTOP are not significant and will never be examined.  We must
10155      ensure that is the case by explicitly masking out those bits
10156      before returning.  */
10157   varop = force_to_mode (varop, mode, constop, 0);
10158 
10159   /* If VAROP is a CLOBBER, we will fail so return it.  */
10160   if (GET_CODE (varop) == CLOBBER)
10161     return varop;
10162 
10163   /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10164      to VAROP and return the new constant.  */
10165   if (CONST_INT_P (varop))
10166     return gen_int_mode (INTVAL (varop) & constop, mode);
10167 
10168   /* See what bits may be nonzero in VAROP.  Unlike the general case of
10169      a call to nonzero_bits, here we don't care about bits outside
10170      MODE.  */
10171 
10172   nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10173 
10174   /* Turn off all bits in the constant that are known to already be zero.
10175      Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10176      which is tested below.  */
10177 
10178   constop &= nonzero;
10179 
10180   /* If we don't have any bits left, return zero.  */
10181   if (constop == 0)
10182     return const0_rtx;
10183 
10184   /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10185      a power of two, we can replace this with an ASHIFT.  */
10186   if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10187       && (i = exact_log2 (constop)) >= 0)
10188     return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10189 
10190   /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10191      or XOR, then try to apply the distributive law.  This may eliminate
10192      operations if either branch can be simplified because of the AND.
10193      It may also make some cases more complex, but those cases probably
10194      won't match a pattern either with or without this.  */
10195 
10196   if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10197     {
10198       scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10199       return
10200 	gen_lowpart
10201 	  (mode,
10202 	   apply_distributive_law
10203 	   (simplify_gen_binary (GET_CODE (varop), varop_mode,
10204 				 simplify_and_const_int (NULL_RTX, varop_mode,
10205 							 XEXP (varop, 0),
10206 							 constop),
10207 				 simplify_and_const_int (NULL_RTX, varop_mode,
10208 							 XEXP (varop, 1),
10209 							 constop))));
10210     }
10211 
10212   /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10213      the AND and see if one of the operands simplifies to zero.  If so, we
10214      may eliminate it.  */
10215 
10216   if (GET_CODE (varop) == PLUS
10217       && pow2p_hwi (constop + 1))
10218     {
10219       rtx o0, o1;
10220 
10221       o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10222       o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10223       if (o0 == const0_rtx)
10224 	return o1;
10225       if (o1 == const0_rtx)
10226 	return o0;
10227     }
10228 
10229   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
10230   varop = gen_lowpart (mode, varop);
10231   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10232     return NULL_RTX;
10233 
10234   /* If we are only masking insignificant bits, return VAROP.  */
10235   if (constop == nonzero)
10236     return varop;
10237 
10238   if (varop == orig_varop && constop == orig_constop)
10239     return NULL_RTX;
10240 
10241   /* Otherwise, return an AND.  */
10242   return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10243 }
10244 
10245 
10246 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10247    in MODE.
10248 
10249    Return an equivalent form, if different from X.  Otherwise, return X.  If
10250    X is zero, we are to always construct the equivalent form.  */
10251 
10252 static rtx
10253 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10254 			unsigned HOST_WIDE_INT constop)
10255 {
10256   rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10257   if (tem)
10258     return tem;
10259 
10260   if (!x)
10261     x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10262 			     gen_int_mode (constop, mode));
10263   if (GET_MODE (x) != mode)
10264     x = gen_lowpart (mode, x);
10265   return x;
10266 }
10267 
10268 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10269    We don't care about bits outside of those defined in MODE.
10270    We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10271 
10272    For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10273    a shift, AND, or zero_extract, we can do better.  */
10274 
10275 static rtx
10276 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10277 			      scalar_int_mode mode,
10278 			      unsigned HOST_WIDE_INT *nonzero)
10279 {
10280   rtx tem;
10281   reg_stat_type *rsp;
10282 
10283   /* If X is a register whose nonzero bits value is current, use it.
10284      Otherwise, if X is a register whose value we can find, use that
10285      value.  Otherwise, use the previously-computed global nonzero bits
10286      for this register.  */
10287 
10288   rsp = &reg_stat[REGNO (x)];
10289   if (rsp->last_set_value != 0
10290       && (rsp->last_set_mode == mode
10291 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10292 	      && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10293 	      && GET_MODE_CLASS (mode) == MODE_INT))
10294       && ((rsp->last_set_label >= label_tick_ebb_start
10295 	   && rsp->last_set_label < label_tick)
10296 	  || (rsp->last_set_label == label_tick
10297               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10298 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10299 	      && REGNO (x) < reg_n_sets_max
10300 	      && REG_N_SETS (REGNO (x)) == 1
10301 	      && !REGNO_REG_SET_P
10302 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10303 		   REGNO (x)))))
10304     {
10305       /* Note that, even if the precision of last_set_mode is lower than that
10306 	 of mode, record_value_for_reg invoked nonzero_bits on the register
10307 	 with nonzero_bits_mode (because last_set_mode is necessarily integral
10308 	 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10309 	 are all valid, hence in mode too since nonzero_bits_mode is defined
10310 	 to the largest HWI_COMPUTABLE_MODE_P mode.  */
10311       *nonzero &= rsp->last_set_nonzero_bits;
10312       return NULL;
10313     }
10314 
10315   tem = get_last_value (x);
10316   if (tem)
10317     {
10318       if (SHORT_IMMEDIATES_SIGN_EXTEND)
10319 	tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10320 
10321       return tem;
10322     }
10323 
10324   if (nonzero_sign_valid && rsp->nonzero_bits)
10325     {
10326       unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10327 
10328       if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10329 	/* We don't know anything about the upper bits.  */
10330 	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10331 
10332       *nonzero &= mask;
10333     }
10334 
10335   return NULL;
10336 }
10337 
10338 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10339    end of X that are known to be equal to the sign bit.  X will be used
10340    in mode MODE; the returned value will always be between 1 and the
10341    number of bits in MODE.  */
10342 
10343 static rtx
10344 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10345 				     scalar_int_mode mode,
10346 				     unsigned int *result)
10347 {
10348   rtx tem;
10349   reg_stat_type *rsp;
10350 
10351   rsp = &reg_stat[REGNO (x)];
10352   if (rsp->last_set_value != 0
10353       && rsp->last_set_mode == mode
10354       && ((rsp->last_set_label >= label_tick_ebb_start
10355 	   && rsp->last_set_label < label_tick)
10356 	  || (rsp->last_set_label == label_tick
10357               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10358 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10359 	      && REGNO (x) < reg_n_sets_max
10360 	      && REG_N_SETS (REGNO (x)) == 1
10361 	      && !REGNO_REG_SET_P
10362 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10363 		   REGNO (x)))))
10364     {
10365       *result = rsp->last_set_sign_bit_copies;
10366       return NULL;
10367     }
10368 
10369   tem = get_last_value (x);
10370   if (tem != 0)
10371     return tem;
10372 
10373   if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10374       && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10375     *result = rsp->sign_bit_copies;
10376 
10377   return NULL;
10378 }
10379 
10380 /* Return the number of "extended" bits there are in X, when interpreted
10381    as a quantity in MODE whose signedness is indicated by UNSIGNEDP.  For
10382    unsigned quantities, this is the number of high-order zero bits.
10383    For signed quantities, this is the number of copies of the sign bit
10384    minus 1.  In both case, this function returns the number of "spare"
10385    bits.  For example, if two quantities for which this function returns
10386    at least 1 are added, the addition is known not to overflow.
10387 
10388    This function will always return 0 unless called during combine, which
10389    implies that it must be called from a define_split.  */
10390 
10391 unsigned int
10392 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10393 {
10394   if (nonzero_sign_valid == 0)
10395     return 0;
10396 
10397   scalar_int_mode int_mode;
10398   return (unsignedp
10399 	  ? (is_a <scalar_int_mode> (mode, &int_mode)
10400 	     && HWI_COMPUTABLE_MODE_P (int_mode)
10401 	     ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10402 			       - floor_log2 (nonzero_bits (x, int_mode)))
10403 	     : 0)
10404 	  : num_sign_bit_copies (x, mode) - 1);
10405 }
10406 
10407 /* This function is called from `simplify_shift_const' to merge two
10408    outer operations.  Specifically, we have already found that we need
10409    to perform operation *POP0 with constant *PCONST0 at the outermost
10410    position.  We would now like to also perform OP1 with constant CONST1
10411    (with *POP0 being done last).
10412 
10413    Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10414    the resulting operation.  *PCOMP_P is set to 1 if we would need to
10415    complement the innermost operand, otherwise it is unchanged.
10416 
10417    MODE is the mode in which the operation will be done.  No bits outside
10418    the width of this mode matter.  It is assumed that the width of this mode
10419    is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10420 
10421    If *POP0 or OP1 are UNKNOWN, it means no operation is required.  Only NEG, PLUS,
10422    IOR, XOR, and AND are supported.  We may set *POP0 to SET if the proper
10423    result is simply *PCONST0.
10424 
10425    If the resulting operation cannot be expressed as one operation, we
10426    return 0 and do not change *POP0, *PCONST0, and *PCOMP_P.  */
10427 
10428 static int
10429 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10430 {
10431   enum rtx_code op0 = *pop0;
10432   HOST_WIDE_INT const0 = *pconst0;
10433 
10434   const0 &= GET_MODE_MASK (mode);
10435   const1 &= GET_MODE_MASK (mode);
10436 
10437   /* If OP0 is an AND, clear unimportant bits in CONST1.  */
10438   if (op0 == AND)
10439     const1 &= const0;
10440 
10441   /* If OP0 or OP1 is UNKNOWN, this is easy.  Similarly if they are the same or
10442      if OP0 is SET.  */
10443 
10444   if (op1 == UNKNOWN || op0 == SET)
10445     return 1;
10446 
10447   else if (op0 == UNKNOWN)
10448     op0 = op1, const0 = const1;
10449 
10450   else if (op0 == op1)
10451     {
10452       switch (op0)
10453 	{
10454 	case AND:
10455 	  const0 &= const1;
10456 	  break;
10457 	case IOR:
10458 	  const0 |= const1;
10459 	  break;
10460 	case XOR:
10461 	  const0 ^= const1;
10462 	  break;
10463 	case PLUS:
10464 	  const0 += const1;
10465 	  break;
10466 	case NEG:
10467 	  op0 = UNKNOWN;
10468 	  break;
10469 	default:
10470 	  break;
10471 	}
10472     }
10473 
10474   /* Otherwise, if either is a PLUS or NEG, we can't do anything.  */
10475   else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10476     return 0;
10477 
10478   /* If the two constants aren't the same, we can't do anything.  The
10479      remaining six cases can all be done.  */
10480   else if (const0 != const1)
10481     return 0;
10482 
10483   else
10484     switch (op0)
10485       {
10486       case IOR:
10487 	if (op1 == AND)
10488 	  /* (a & b) | b == b */
10489 	  op0 = SET;
10490 	else /* op1 == XOR */
10491 	  /* (a ^ b) | b == a | b */
10492 	  {;}
10493 	break;
10494 
10495       case XOR:
10496 	if (op1 == AND)
10497 	  /* (a & b) ^ b == (~a) & b */
10498 	  op0 = AND, *pcomp_p = 1;
10499 	else /* op1 == IOR */
10500 	  /* (a | b) ^ b == a & ~b */
10501 	  op0 = AND, const0 = ~const0;
10502 	break;
10503 
10504       case AND:
10505 	if (op1 == IOR)
10506 	  /* (a | b) & b == b */
10507 	op0 = SET;
10508 	else /* op1 == XOR */
10509 	  /* (a ^ b) & b) == (~a) & b */
10510 	  *pcomp_p = 1;
10511 	break;
10512       default:
10513 	break;
10514       }
10515 
10516   /* Check for NO-OP cases.  */
10517   const0 &= GET_MODE_MASK (mode);
10518   if (const0 == 0
10519       && (op0 == IOR || op0 == XOR || op0 == PLUS))
10520     op0 = UNKNOWN;
10521   else if (const0 == 0 && op0 == AND)
10522     op0 = SET;
10523   else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10524 	   && op0 == AND)
10525     op0 = UNKNOWN;
10526 
10527   *pop0 = op0;
10528 
10529   /* ??? Slightly redundant with the above mask, but not entirely.
10530      Moving this above means we'd have to sign-extend the mode mask
10531      for the final test.  */
10532   if (op0 != UNKNOWN && op0 != NEG)
10533     *pconst0 = trunc_int_for_mode (const0, mode);
10534 
10535   return 1;
10536 }
10537 
10538 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10539    the shift in.  The original shift operation CODE is performed on OP in
10540    ORIG_MODE.  Return the wider mode MODE if we can perform the operation
10541    in that mode.  Return ORIG_MODE otherwise.  We can also assume that the
10542    result of the shift is subject to operation OUTER_CODE with operand
10543    OUTER_CONST.  */
10544 
10545 static scalar_int_mode
10546 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10547 		      scalar_int_mode orig_mode, scalar_int_mode mode,
10548 		      enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10549 {
10550   gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10551 
10552   /* In general we can't perform in wider mode for right shift and rotate.  */
10553   switch (code)
10554     {
10555     case ASHIFTRT:
10556       /* We can still widen if the bits brought in from the left are identical
10557 	 to the sign bit of ORIG_MODE.  */
10558       if (num_sign_bit_copies (op, mode)
10559 	  > (unsigned) (GET_MODE_PRECISION (mode)
10560 			- GET_MODE_PRECISION (orig_mode)))
10561 	return mode;
10562       return orig_mode;
10563 
10564     case LSHIFTRT:
10565       /* Similarly here but with zero bits.  */
10566       if (HWI_COMPUTABLE_MODE_P (mode)
10567 	  && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10568 	return mode;
10569 
10570       /* We can also widen if the bits brought in will be masked off.  This
10571 	 operation is performed in ORIG_MODE.  */
10572       if (outer_code == AND)
10573 	{
10574 	  int care_bits = low_bitmask_len (orig_mode, outer_const);
10575 
10576 	  if (care_bits >= 0
10577 	      && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10578 	    return mode;
10579 	}
10580       /* fall through */
10581 
10582     case ROTATE:
10583       return orig_mode;
10584 
10585     case ROTATERT:
10586       gcc_unreachable ();
10587 
10588     default:
10589       return mode;
10590     }
10591 }
10592 
10593 /* Simplify a shift of VAROP by ORIG_COUNT bits.  CODE says what kind
10594    of shift.  The result of the shift is RESULT_MODE.  Return NULL_RTX
10595    if we cannot simplify it.  Otherwise, return a simplified value.
10596 
10597    The shift is normally computed in the widest mode we find in VAROP, as
10598    long as it isn't a different number of words than RESULT_MODE.  Exceptions
10599    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
10600 
10601 static rtx
10602 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10603 			rtx varop, int orig_count)
10604 {
10605   enum rtx_code orig_code = code;
10606   rtx orig_varop = varop;
10607   int count, log2;
10608   machine_mode mode = result_mode;
10609   machine_mode shift_mode;
10610   scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10611   /* We form (outer_op (code varop count) (outer_const)).  */
10612   enum rtx_code outer_op = UNKNOWN;
10613   HOST_WIDE_INT outer_const = 0;
10614   int complement_p = 0;
10615   rtx new_rtx, x;
10616 
10617   /* Make sure and truncate the "natural" shift on the way in.  We don't
10618      want to do this inside the loop as it makes it more difficult to
10619      combine shifts.  */
10620   if (SHIFT_COUNT_TRUNCATED)
10621     orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10622 
10623   /* If we were given an invalid count, don't do anything except exactly
10624      what was requested.  */
10625 
10626   if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10627     return NULL_RTX;
10628 
10629   count = orig_count;
10630 
10631   /* Unless one of the branches of the `if' in this loop does a `continue',
10632      we will `break' the loop after the `if'.  */
10633 
10634   while (count != 0)
10635     {
10636       /* If we have an operand of (clobber (const_int 0)), fail.  */
10637       if (GET_CODE (varop) == CLOBBER)
10638 	return NULL_RTX;
10639 
10640       /* Convert ROTATERT to ROTATE.  */
10641       if (code == ROTATERT)
10642 	{
10643 	  unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10644 	  code = ROTATE;
10645 	  count = bitsize - count;
10646 	}
10647 
10648       shift_mode = result_mode;
10649       if (shift_mode != mode)
10650 	{
10651 	  /* We only change the modes of scalar shifts.  */
10652 	  int_mode = as_a <scalar_int_mode> (mode);
10653 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
10654 	  shift_mode = try_widen_shift_mode (code, varop, count,
10655 					     int_result_mode, int_mode,
10656 					     outer_op, outer_const);
10657 	}
10658 
10659       scalar_int_mode shift_unit_mode
10660 	= as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10661 
10662       /* Handle cases where the count is greater than the size of the mode
10663 	 minus 1.  For ASHIFT, use the size minus one as the count (this can
10664 	 occur when simplifying (lshiftrt (ashiftrt ..))).  For rotates,
10665 	 take the count modulo the size.  For other shifts, the result is
10666 	 zero.
10667 
10668 	 Since these shifts are being produced by the compiler by combining
10669 	 multiple operations, each of which are defined, we know what the
10670 	 result is supposed to be.  */
10671 
10672       if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10673 	{
10674 	  if (code == ASHIFTRT)
10675 	    count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10676 	  else if (code == ROTATE || code == ROTATERT)
10677 	    count %= GET_MODE_PRECISION (shift_unit_mode);
10678 	  else
10679 	    {
10680 	      /* We can't simply return zero because there may be an
10681 		 outer op.  */
10682 	      varop = const0_rtx;
10683 	      count = 0;
10684 	      break;
10685 	    }
10686 	}
10687 
10688       /* If we discovered we had to complement VAROP, leave.  Making a NOT
10689 	 here would cause an infinite loop.  */
10690       if (complement_p)
10691 	break;
10692 
10693       if (shift_mode == shift_unit_mode)
10694 	{
10695 	  /* An arithmetic right shift of a quantity known to be -1 or 0
10696 	     is a no-op.  */
10697 	  if (code == ASHIFTRT
10698 	      && (num_sign_bit_copies (varop, shift_unit_mode)
10699 		  == GET_MODE_PRECISION (shift_unit_mode)))
10700 	    {
10701 	      count = 0;
10702 	      break;
10703 	    }
10704 
10705 	  /* If we are doing an arithmetic right shift and discarding all but
10706 	     the sign bit copies, this is equivalent to doing a shift by the
10707 	     bitsize minus one.  Convert it into that shift because it will
10708 	     often allow other simplifications.  */
10709 
10710 	  if (code == ASHIFTRT
10711 	      && (count + num_sign_bit_copies (varop, shift_unit_mode)
10712 		  >= GET_MODE_PRECISION (shift_unit_mode)))
10713 	    count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10714 
10715 	  /* We simplify the tests below and elsewhere by converting
10716 	     ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10717 	     `make_compound_operation' will convert it to an ASHIFTRT for
10718 	     those machines (such as VAX) that don't have an LSHIFTRT.  */
10719 	  if (code == ASHIFTRT
10720 	      && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10721 	      && val_signbit_known_clear_p (shift_unit_mode,
10722 					    nonzero_bits (varop,
10723 							  shift_unit_mode)))
10724 	    code = LSHIFTRT;
10725 
10726 	  if (((code == LSHIFTRT
10727 		&& HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10728 		&& !(nonzero_bits (varop, shift_unit_mode) >> count))
10729 	       || (code == ASHIFT
10730 		   && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10731 		   && !((nonzero_bits (varop, shift_unit_mode) << count)
10732 			& GET_MODE_MASK (shift_unit_mode))))
10733 	      && !side_effects_p (varop))
10734 	    varop = const0_rtx;
10735 	}
10736 
10737       switch (GET_CODE (varop))
10738 	{
10739 	case SIGN_EXTEND:
10740 	case ZERO_EXTEND:
10741 	case SIGN_EXTRACT:
10742 	case ZERO_EXTRACT:
10743 	  new_rtx = expand_compound_operation (varop);
10744 	  if (new_rtx != varop)
10745 	    {
10746 	      varop = new_rtx;
10747 	      continue;
10748 	    }
10749 	  break;
10750 
10751 	case MEM:
10752 	  /* The following rules apply only to scalars.  */
10753 	  if (shift_mode != shift_unit_mode)
10754 	    break;
10755 	  int_mode = as_a <scalar_int_mode> (mode);
10756 
10757 	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10758 	     minus the width of a smaller mode, we can do this with a
10759 	     SIGN_EXTEND or ZERO_EXTEND from the narrower memory location.  */
10760 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10761 	      && ! mode_dependent_address_p (XEXP (varop, 0),
10762 					     MEM_ADDR_SPACE (varop))
10763 	      && ! MEM_VOLATILE_P (varop)
10764 	      && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10765 		  .exists (&tmode)))
10766 	    {
10767 	      new_rtx = adjust_address_nv (varop, tmode,
10768 					   BYTES_BIG_ENDIAN ? 0
10769 					   : count / BITS_PER_UNIT);
10770 
10771 	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10772 				     : ZERO_EXTEND, int_mode, new_rtx);
10773 	      count = 0;
10774 	      continue;
10775 	    }
10776 	  break;
10777 
10778 	case SUBREG:
10779 	  /* The following rules apply only to scalars.  */
10780 	  if (shift_mode != shift_unit_mode)
10781 	    break;
10782 	  int_mode = as_a <scalar_int_mode> (mode);
10783 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10784 
10785 	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
10786 	     the same number of words as what we've seen so far.  Then store
10787 	     the widest mode in MODE.  */
10788 	  if (subreg_lowpart_p (varop)
10789 	      && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10790 	      && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10791 	      && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10792 		  == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10793 	      && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10794 	    {
10795 	      varop = SUBREG_REG (varop);
10796 	      if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10797 		mode = inner_mode;
10798 	      continue;
10799 	    }
10800 	  break;
10801 
10802 	case MULT:
10803 	  /* Some machines use MULT instead of ASHIFT because MULT
10804 	     is cheaper.  But it is still better on those machines to
10805 	     merge two shifts into one.  */
10806 	  if (CONST_INT_P (XEXP (varop, 1))
10807 	      && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10808 	    {
10809 	      rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10810 	      varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10811 					   XEXP (varop, 0), log2_rtx);
10812 	      continue;
10813 	    }
10814 	  break;
10815 
10816 	case UDIV:
10817 	  /* Similar, for when divides are cheaper.  */
10818 	  if (CONST_INT_P (XEXP (varop, 1))
10819 	      && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10820 	    {
10821 	      rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10822 	      varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10823 					   XEXP (varop, 0), log2_rtx);
10824 	      continue;
10825 	    }
10826 	  break;
10827 
10828 	case ASHIFTRT:
10829 	  /* If we are extracting just the sign bit of an arithmetic
10830 	     right shift, that shift is not needed.  However, the sign
10831 	     bit of a wider mode may be different from what would be
10832 	     interpreted as the sign bit in a narrower mode, so, if
10833 	     the result is narrower, don't discard the shift.  */
10834 	  if (code == LSHIFTRT
10835 	      && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10836 	      && (GET_MODE_UNIT_BITSIZE (result_mode)
10837 		  >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10838 	    {
10839 	      varop = XEXP (varop, 0);
10840 	      continue;
10841 	    }
10842 
10843 	  /* fall through */
10844 
10845 	case LSHIFTRT:
10846 	case ASHIFT:
10847 	case ROTATE:
10848 	  /* The following rules apply only to scalars.  */
10849 	  if (shift_mode != shift_unit_mode)
10850 	    break;
10851 	  int_mode = as_a <scalar_int_mode> (mode);
10852 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10853 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
10854 
10855 	  /* Here we have two nested shifts.  The result is usually the
10856 	     AND of a new shift with a mask.  We compute the result below.  */
10857 	  if (CONST_INT_P (XEXP (varop, 1))
10858 	      && INTVAL (XEXP (varop, 1)) >= 0
10859 	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10860 	      && HWI_COMPUTABLE_MODE_P (int_result_mode)
10861 	      && HWI_COMPUTABLE_MODE_P (int_mode))
10862 	    {
10863 	      enum rtx_code first_code = GET_CODE (varop);
10864 	      unsigned int first_count = INTVAL (XEXP (varop, 1));
10865 	      unsigned HOST_WIDE_INT mask;
10866 	      rtx mask_rtx;
10867 
10868 	      /* We have one common special case.  We can't do any merging if
10869 		 the inner code is an ASHIFTRT of a smaller mode.  However, if
10870 		 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10871 		 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10872 		 we can convert it to
10873 		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10874 		 This simplifies certain SIGN_EXTEND operations.  */
10875 	      if (code == ASHIFT && first_code == ASHIFTRT
10876 		  && count == (GET_MODE_PRECISION (int_result_mode)
10877 			       - GET_MODE_PRECISION (int_varop_mode)))
10878 		{
10879 		  /* C3 has the low-order C1 bits zero.  */
10880 
10881 		  mask = GET_MODE_MASK (int_mode)
10882 			 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10883 
10884 		  varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10885 						  XEXP (varop, 0), mask);
10886 		  varop = simplify_shift_const (NULL_RTX, ASHIFT,
10887 						int_result_mode, varop, count);
10888 		  count = first_count;
10889 		  code = ASHIFTRT;
10890 		  continue;
10891 		}
10892 
10893 	      /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10894 		 than C1 high-order bits equal to the sign bit, we can convert
10895 		 this to either an ASHIFT or an ASHIFTRT depending on the
10896 		 two counts.
10897 
10898 		 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE.  */
10899 
10900 	      if (code == ASHIFTRT && first_code == ASHIFT
10901 		  && int_varop_mode == shift_unit_mode
10902 		  && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10903 		      > first_count))
10904 		{
10905 		  varop = XEXP (varop, 0);
10906 		  count -= first_count;
10907 		  if (count < 0)
10908 		    {
10909 		      count = -count;
10910 		      code = ASHIFT;
10911 		    }
10912 
10913 		  continue;
10914 		}
10915 
10916 	      /* There are some cases we can't do.  If CODE is ASHIFTRT,
10917 		 we can only do this if FIRST_CODE is also ASHIFTRT.
10918 
10919 		 We can't do the case when CODE is ROTATE and FIRST_CODE is
10920 		 ASHIFTRT.
10921 
10922 		 If the mode of this shift is not the mode of the outer shift,
10923 		 we can't do this if either shift is a right shift or ROTATE.
10924 
10925 		 Finally, we can't do any of these if the mode is too wide
10926 		 unless the codes are the same.
10927 
10928 		 Handle the case where the shift codes are the same
10929 		 first.  */
10930 
10931 	      if (code == first_code)
10932 		{
10933 		  if (int_varop_mode != int_result_mode
10934 		      && (code == ASHIFTRT || code == LSHIFTRT
10935 			  || code == ROTATE))
10936 		    break;
10937 
10938 		  count += first_count;
10939 		  varop = XEXP (varop, 0);
10940 		  continue;
10941 		}
10942 
10943 	      if (code == ASHIFTRT
10944 		  || (code == ROTATE && first_code == ASHIFTRT)
10945 		  || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10946 		  || (int_varop_mode != int_result_mode
10947 		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
10948 			  || first_code == ROTATE
10949 			  || code == ROTATE)))
10950 		break;
10951 
10952 	      /* To compute the mask to apply after the shift, shift the
10953 		 nonzero bits of the inner shift the same way the
10954 		 outer shift will.  */
10955 
10956 	      mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10957 				       int_result_mode);
10958 	      rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10959 	      mask_rtx
10960 		= simplify_const_binary_operation (code, int_result_mode,
10961 						   mask_rtx, count_rtx);
10962 
10963 	      /* Give up if we can't compute an outer operation to use.  */
10964 	      if (mask_rtx == 0
10965 		  || !CONST_INT_P (mask_rtx)
10966 		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
10967 					INTVAL (mask_rtx),
10968 					int_result_mode, &complement_p))
10969 		break;
10970 
10971 	      /* If the shifts are in the same direction, we add the
10972 		 counts.  Otherwise, we subtract them.  */
10973 	      if ((code == ASHIFTRT || code == LSHIFTRT)
10974 		  == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10975 		count += first_count;
10976 	      else
10977 		count -= first_count;
10978 
10979 	      /* If COUNT is positive, the new shift is usually CODE,
10980 		 except for the two exceptions below, in which case it is
10981 		 FIRST_CODE.  If the count is negative, FIRST_CODE should
10982 		 always be used  */
10983 	      if (count > 0
10984 		  && ((first_code == ROTATE && code == ASHIFT)
10985 		      || (first_code == ASHIFTRT && code == LSHIFTRT)))
10986 		code = first_code;
10987 	      else if (count < 0)
10988 		code = first_code, count = -count;
10989 
10990 	      varop = XEXP (varop, 0);
10991 	      continue;
10992 	    }
10993 
10994 	  /* If we have (A << B << C) for any shift, we can convert this to
10995 	     (A << C << B).  This wins if A is a constant.  Only try this if
10996 	     B is not a constant.  */
10997 
10998 	  else if (GET_CODE (varop) == code
10999 		   && CONST_INT_P (XEXP (varop, 0))
11000 		   && !CONST_INT_P (XEXP (varop, 1)))
11001 	    {
11002 	      /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
11003 		 sure the result will be masked.  See PR70222.  */
11004 	      if (code == LSHIFTRT
11005 		  && int_mode != int_result_mode
11006 		  && !merge_outer_ops (&outer_op, &outer_const, AND,
11007 				       GET_MODE_MASK (int_result_mode)
11008 				       >> orig_count, int_result_mode,
11009 				       &complement_p))
11010 		break;
11011 	      /* For ((int) (cstLL >> count)) >> cst2 just give up.  Queuing
11012 		 up outer sign extension (often left and right shift) is
11013 		 hardly more efficient than the original.  See PR70429.  */
11014 	      if (code == ASHIFTRT && int_mode != int_result_mode)
11015 		break;
11016 
11017 	      rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
11018 	      rtx new_rtx = simplify_const_binary_operation (code, int_mode,
11019 							     XEXP (varop, 0),
11020 							     count_rtx);
11021 	      varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
11022 	      count = 0;
11023 	      continue;
11024 	    }
11025 	  break;
11026 
11027 	case NOT:
11028 	  /* The following rules apply only to scalars.  */
11029 	  if (shift_mode != shift_unit_mode)
11030 	    break;
11031 
11032 	  /* Make this fit the case below.  */
11033 	  varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
11034 	  continue;
11035 
11036 	case IOR:
11037 	case AND:
11038 	case XOR:
11039 	  /* The following rules apply only to scalars.  */
11040 	  if (shift_mode != shift_unit_mode)
11041 	    break;
11042 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11043 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11044 
11045 	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11046 	     with C the size of VAROP - 1 and the shift is logical if
11047 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11048 	     we have an (le X 0) operation.   If we have an arithmetic shift
11049 	     and STORE_FLAG_VALUE is 1 or we have a logical shift with
11050 	     STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation.  */
11051 
11052 	  if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
11053 	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
11054 	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11055 	      && (code == LSHIFTRT || code == ASHIFTRT)
11056 	      && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11057 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11058 	    {
11059 	      count = 0;
11060 	      varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
11061 				  const0_rtx);
11062 
11063 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11064 		varop = gen_rtx_NEG (int_varop_mode, varop);
11065 
11066 	      continue;
11067 	    }
11068 
11069 	  /* If we have (shift (logical)), move the logical to the outside
11070 	     to allow it to possibly combine with another logical and the
11071 	     shift to combine with another shift.  This also canonicalizes to
11072 	     what a ZERO_EXTRACT looks like.  Also, some machines have
11073 	     (and (shift)) insns.  */
11074 
11075 	  if (CONST_INT_P (XEXP (varop, 1))
11076 	      /* We can't do this if we have (ashiftrt (xor))  and the
11077 		 constant has its sign bit set in shift_unit_mode with
11078 		 shift_unit_mode wider than result_mode.  */
11079 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11080 		   && int_result_mode != shift_unit_mode
11081 		   && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11082 					  shift_unit_mode) < 0)
11083 	      && (new_rtx = simplify_const_binary_operation
11084 		  (code, int_result_mode,
11085 		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11086 		   gen_int_shift_amount (int_result_mode, count))) != 0
11087 	      && CONST_INT_P (new_rtx)
11088 	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
11089 				  INTVAL (new_rtx), int_result_mode,
11090 				  &complement_p))
11091 	    {
11092 	      varop = XEXP (varop, 0);
11093 	      continue;
11094 	    }
11095 
11096 	  /* If we can't do that, try to simplify the shift in each arm of the
11097 	     logical expression, make a new logical expression, and apply
11098 	     the inverse distributive law.  This also can't be done for
11099 	     (ashiftrt (xor)) where we've widened the shift and the constant
11100 	     changes the sign bit.  */
11101 	  if (CONST_INT_P (XEXP (varop, 1))
11102 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11103 		   && int_result_mode != shift_unit_mode
11104 		   && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11105 					  shift_unit_mode) < 0))
11106 	    {
11107 	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11108 					      XEXP (varop, 0), count);
11109 	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11110 					      XEXP (varop, 1), count);
11111 
11112 	      varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11113 					   lhs, rhs);
11114 	      varop = apply_distributive_law (varop);
11115 
11116 	      count = 0;
11117 	      continue;
11118 	    }
11119 	  break;
11120 
11121 	case EQ:
11122 	  /* The following rules apply only to scalars.  */
11123 	  if (shift_mode != shift_unit_mode)
11124 	    break;
11125 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11126 
11127 	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11128 	     says that the sign bit can be tested, FOO has mode MODE, C is
11129 	     GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11130 	     that may be nonzero.  */
11131 	  if (code == LSHIFTRT
11132 	      && XEXP (varop, 1) == const0_rtx
11133 	      && GET_MODE (XEXP (varop, 0)) == int_result_mode
11134 	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11135 	      && HWI_COMPUTABLE_MODE_P (int_result_mode)
11136 	      && STORE_FLAG_VALUE == -1
11137 	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11138 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11139 				  int_result_mode, &complement_p))
11140 	    {
11141 	      varop = XEXP (varop, 0);
11142 	      count = 0;
11143 	      continue;
11144 	    }
11145 	  break;
11146 
11147 	case NEG:
11148 	  /* The following rules apply only to scalars.  */
11149 	  if (shift_mode != shift_unit_mode)
11150 	    break;
11151 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11152 
11153 	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11154 	     than the number of bits in the mode is equivalent to A.  */
11155 	  if (code == LSHIFTRT
11156 	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11157 	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11158 	    {
11159 	      varop = XEXP (varop, 0);
11160 	      count = 0;
11161 	      continue;
11162 	    }
11163 
11164 	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
11165 	     NEG outside to allow shifts to combine.  */
11166 	  if (code == ASHIFT
11167 	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11168 				  int_result_mode, &complement_p))
11169 	    {
11170 	      varop = XEXP (varop, 0);
11171 	      continue;
11172 	    }
11173 	  break;
11174 
11175 	case PLUS:
11176 	  /* The following rules apply only to scalars.  */
11177 	  if (shift_mode != shift_unit_mode)
11178 	    break;
11179 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11180 
11181 	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11182 	     is one less than the number of bits in the mode is
11183 	     equivalent to (xor A 1).  */
11184 	  if (code == LSHIFTRT
11185 	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11186 	      && XEXP (varop, 1) == constm1_rtx
11187 	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11188 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11189 				  int_result_mode, &complement_p))
11190 	    {
11191 	      count = 0;
11192 	      varop = XEXP (varop, 0);
11193 	      continue;
11194 	    }
11195 
11196 	  /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11197 	     that might be nonzero in BAR are those being shifted out and those
11198 	     bits are known zero in FOO, we can replace the PLUS with FOO.
11199 	     Similarly in the other operand order.  This code occurs when
11200 	     we are computing the size of a variable-size array.  */
11201 
11202 	  if ((code == ASHIFTRT || code == LSHIFTRT)
11203 	      && count < HOST_BITS_PER_WIDE_INT
11204 	      && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11205 	      && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11206 		  & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11207 	    {
11208 	      varop = XEXP (varop, 0);
11209 	      continue;
11210 	    }
11211 	  else if ((code == ASHIFTRT || code == LSHIFTRT)
11212 		   && count < HOST_BITS_PER_WIDE_INT
11213 		   && HWI_COMPUTABLE_MODE_P (int_result_mode)
11214 		   && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11215 		       >> count) == 0
11216 		   && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11217 		       & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11218 	    {
11219 	      varop = XEXP (varop, 1);
11220 	      continue;
11221 	    }
11222 
11223 	  /* (ashift (plus foo C) N) is (plus (ashift foo N) C').  */
11224 	  if (code == ASHIFT
11225 	      && CONST_INT_P (XEXP (varop, 1))
11226 	      && (new_rtx = simplify_const_binary_operation
11227 		  (ASHIFT, int_result_mode,
11228 		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11229 		   gen_int_shift_amount (int_result_mode, count))) != 0
11230 	      && CONST_INT_P (new_rtx)
11231 	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
11232 				  INTVAL (new_rtx), int_result_mode,
11233 				  &complement_p))
11234 	    {
11235 	      varop = XEXP (varop, 0);
11236 	      continue;
11237 	    }
11238 
11239 	  /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11240 	     signbit', and attempt to change the PLUS to an XOR and move it to
11241 	     the outer operation as is done above in the AND/IOR/XOR case
11242 	     leg for shift(logical). See details in logical handling above
11243 	     for reasoning in doing so.  */
11244 	  if (code == LSHIFTRT
11245 	      && CONST_INT_P (XEXP (varop, 1))
11246 	      && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11247 	      && (new_rtx = simplify_const_binary_operation
11248 		  (code, int_result_mode,
11249 		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11250 		   gen_int_shift_amount (int_result_mode, count))) != 0
11251 	      && CONST_INT_P (new_rtx)
11252 	      && merge_outer_ops (&outer_op, &outer_const, XOR,
11253 				  INTVAL (new_rtx), int_result_mode,
11254 				  &complement_p))
11255 	    {
11256 	      varop = XEXP (varop, 0);
11257 	      continue;
11258 	    }
11259 
11260 	  break;
11261 
11262 	case MINUS:
11263 	  /* The following rules apply only to scalars.  */
11264 	  if (shift_mode != shift_unit_mode)
11265 	    break;
11266 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11267 
11268 	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11269 	     with C the size of VAROP - 1 and the shift is logical if
11270 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11271 	     we have a (gt X 0) operation.  If the shift is arithmetic with
11272 	     STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11273 	     we have a (neg (gt X 0)) operation.  */
11274 
11275 	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11276 	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11277 	      && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11278 	      && (code == LSHIFTRT || code == ASHIFTRT)
11279 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11280 	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11281 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11282 	    {
11283 	      count = 0;
11284 	      varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11285 				  const0_rtx);
11286 
11287 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11288 		varop = gen_rtx_NEG (int_varop_mode, varop);
11289 
11290 	      continue;
11291 	    }
11292 	  break;
11293 
11294 	case TRUNCATE:
11295 	  /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11296 	     if the truncate does not affect the value.  */
11297 	  if (code == LSHIFTRT
11298 	      && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11299 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11300 	      && (INTVAL (XEXP (XEXP (varop, 0), 1))
11301 		  >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11302 		      - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11303 	    {
11304 	      rtx varop_inner = XEXP (varop, 0);
11305 	      int new_count = count + INTVAL (XEXP (varop_inner, 1));
11306 	      rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11307 							new_count);
11308 	      varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11309 					      XEXP (varop_inner, 0),
11310 					      new_count_rtx);
11311 	      varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11312 	      count = 0;
11313 	      continue;
11314 	    }
11315 	  break;
11316 
11317 	default:
11318 	  break;
11319 	}
11320 
11321       break;
11322     }
11323 
11324   shift_mode = result_mode;
11325   if (shift_mode != mode)
11326     {
11327       /* We only change the modes of scalar shifts.  */
11328       int_mode = as_a <scalar_int_mode> (mode);
11329       int_result_mode = as_a <scalar_int_mode> (result_mode);
11330       shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11331 					 int_mode, outer_op, outer_const);
11332     }
11333 
11334   /* We have now finished analyzing the shift.  The result should be
11335      a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
11336      OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11337      to the result of the shift.  OUTER_CONST is the relevant constant,
11338      but we must turn off all bits turned off in the shift.  */
11339 
11340   if (outer_op == UNKNOWN
11341       && orig_code == code && orig_count == count
11342       && varop == orig_varop
11343       && shift_mode == GET_MODE (varop))
11344     return NULL_RTX;
11345 
11346   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
11347   varop = gen_lowpart (shift_mode, varop);
11348   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11349     return NULL_RTX;
11350 
11351   /* If we have an outer operation and we just made a shift, it is
11352      possible that we could have simplified the shift were it not
11353      for the outer operation.  So try to do the simplification
11354      recursively.  */
11355 
11356   if (outer_op != UNKNOWN)
11357     x = simplify_shift_const_1 (code, shift_mode, varop, count);
11358   else
11359     x = NULL_RTX;
11360 
11361   if (x == NULL_RTX)
11362     x = simplify_gen_binary (code, shift_mode, varop,
11363 			     gen_int_shift_amount (shift_mode, count));
11364 
11365   /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11366      turn off all the bits that the shift would have turned off.  */
11367   if (orig_code == LSHIFTRT && result_mode != shift_mode)
11368     /* We only change the modes of scalar shifts.  */
11369     x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11370 				x, GET_MODE_MASK (result_mode) >> orig_count);
11371 
11372   /* Do the remainder of the processing in RESULT_MODE.  */
11373   x = gen_lowpart_or_truncate (result_mode, x);
11374 
11375   /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11376      operation.  */
11377   if (complement_p)
11378     x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11379 
11380   if (outer_op != UNKNOWN)
11381     {
11382       int_result_mode = as_a <scalar_int_mode> (result_mode);
11383 
11384       if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11385 	  && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11386 	outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11387 
11388       if (outer_op == AND)
11389 	x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11390       else if (outer_op == SET)
11391 	{
11392 	  /* This means that we have determined that the result is
11393 	     equivalent to a constant.  This should be rare.  */
11394 	  if (!side_effects_p (x))
11395 	    x = GEN_INT (outer_const);
11396 	}
11397       else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11398 	x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11399       else
11400 	x = simplify_gen_binary (outer_op, int_result_mode, x,
11401 				 GEN_INT (outer_const));
11402     }
11403 
11404   return x;
11405 }
11406 
11407 /* Simplify a shift of VAROP by COUNT bits.  CODE says what kind of shift.
11408    The result of the shift is RESULT_MODE.  If we cannot simplify it,
11409    return X or, if it is NULL, synthesize the expression with
11410    simplify_gen_binary.  Otherwise, return a simplified value.
11411 
11412    The shift is normally computed in the widest mode we find in VAROP, as
11413    long as it isn't a different number of words than RESULT_MODE.  Exceptions
11414    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
11415 
11416 static rtx
11417 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11418 		      rtx varop, int count)
11419 {
11420   rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11421   if (tem)
11422     return tem;
11423 
11424   if (!x)
11425     x = simplify_gen_binary (code, GET_MODE (varop), varop,
11426 			     gen_int_shift_amount (GET_MODE (varop), count));
11427   if (GET_MODE (x) != result_mode)
11428     x = gen_lowpart (result_mode, x);
11429   return x;
11430 }
11431 
11432 
11433 /* A subroutine of recog_for_combine.  See there for arguments and
11434    return value.  */
11435 
11436 static int
11437 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11438 {
11439   rtx pat = *pnewpat;
11440   rtx pat_without_clobbers;
11441   int insn_code_number;
11442   int num_clobbers_to_add = 0;
11443   int i;
11444   rtx notes = NULL_RTX;
11445   rtx old_notes, old_pat;
11446   int old_icode;
11447 
11448   /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11449      we use to indicate that something didn't match.  If we find such a
11450      thing, force rejection.  */
11451   if (GET_CODE (pat) == PARALLEL)
11452     for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11453       if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11454 	  && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11455 	return -1;
11456 
11457   old_pat = PATTERN (insn);
11458   old_notes = REG_NOTES (insn);
11459   PATTERN (insn) = pat;
11460   REG_NOTES (insn) = NULL_RTX;
11461 
11462   insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11463   if (dump_file && (dump_flags & TDF_DETAILS))
11464     {
11465       if (insn_code_number < 0)
11466 	fputs ("Failed to match this instruction:\n", dump_file);
11467       else
11468 	fputs ("Successfully matched this instruction:\n", dump_file);
11469       print_rtl_single (dump_file, pat);
11470     }
11471 
11472   /* If it isn't, there is the possibility that we previously had an insn
11473      that clobbered some register as a side effect, but the combined
11474      insn doesn't need to do that.  So try once more without the clobbers
11475      unless this represents an ASM insn.  */
11476 
11477   if (insn_code_number < 0 && ! check_asm_operands (pat)
11478       && GET_CODE (pat) == PARALLEL)
11479     {
11480       int pos;
11481 
11482       for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11483 	if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11484 	  {
11485 	    if (i != pos)
11486 	      SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11487 	    pos++;
11488 	  }
11489 
11490       SUBST_INT (XVECLEN (pat, 0), pos);
11491 
11492       if (pos == 1)
11493 	pat = XVECEXP (pat, 0, 0);
11494 
11495       PATTERN (insn) = pat;
11496       insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11497       if (dump_file && (dump_flags & TDF_DETAILS))
11498 	{
11499 	  if (insn_code_number < 0)
11500 	    fputs ("Failed to match this instruction:\n", dump_file);
11501 	  else
11502 	    fputs ("Successfully matched this instruction:\n", dump_file);
11503 	  print_rtl_single (dump_file, pat);
11504 	}
11505     }
11506 
11507   pat_without_clobbers = pat;
11508 
11509   PATTERN (insn) = old_pat;
11510   REG_NOTES (insn) = old_notes;
11511 
11512   /* Recognize all noop sets, these will be killed by followup pass.  */
11513   if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11514     insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11515 
11516   /* If we had any clobbers to add, make a new pattern than contains
11517      them.  Then check to make sure that all of them are dead.  */
11518   if (num_clobbers_to_add)
11519     {
11520       rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11521 				     rtvec_alloc (GET_CODE (pat) == PARALLEL
11522 						  ? (XVECLEN (pat, 0)
11523 						     + num_clobbers_to_add)
11524 						  : num_clobbers_to_add + 1));
11525 
11526       if (GET_CODE (pat) == PARALLEL)
11527 	for (i = 0; i < XVECLEN (pat, 0); i++)
11528 	  XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11529       else
11530 	XVECEXP (newpat, 0, 0) = pat;
11531 
11532       add_clobbers (newpat, insn_code_number);
11533 
11534       for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11535 	   i < XVECLEN (newpat, 0); i++)
11536 	{
11537 	  if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11538 	      && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11539 	    return -1;
11540 	  if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11541 	    {
11542 	      gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11543 	      notes = alloc_reg_note (REG_UNUSED,
11544 				      XEXP (XVECEXP (newpat, 0, i), 0), notes);
11545 	    }
11546 	}
11547       pat = newpat;
11548     }
11549 
11550   if (insn_code_number >= 0
11551       && insn_code_number != NOOP_MOVE_INSN_CODE)
11552     {
11553       old_pat = PATTERN (insn);
11554       old_notes = REG_NOTES (insn);
11555       old_icode = INSN_CODE (insn);
11556       PATTERN (insn) = pat;
11557       REG_NOTES (insn) = notes;
11558       INSN_CODE (insn) = insn_code_number;
11559 
11560       /* Allow targets to reject combined insn.  */
11561       if (!targetm.legitimate_combined_insn (insn))
11562 	{
11563 	  if (dump_file && (dump_flags & TDF_DETAILS))
11564 	    fputs ("Instruction not appropriate for target.",
11565 		   dump_file);
11566 
11567 	  /* Callers expect recog_for_combine to strip
11568 	     clobbers from the pattern on failure.  */
11569 	  pat = pat_without_clobbers;
11570 	  notes = NULL_RTX;
11571 
11572 	  insn_code_number = -1;
11573 	}
11574 
11575       PATTERN (insn) = old_pat;
11576       REG_NOTES (insn) = old_notes;
11577       INSN_CODE (insn) = old_icode;
11578     }
11579 
11580   *pnewpat = pat;
11581   *pnotes = notes;
11582 
11583   return insn_code_number;
11584 }
11585 
11586 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11587    expressed as an AND and maybe an LSHIFTRT, to that formulation.
11588    Return whether anything was so changed.  */
11589 
11590 static bool
11591 change_zero_ext (rtx pat)
11592 {
11593   bool changed = false;
11594   rtx *src = &SET_SRC (pat);
11595 
11596   subrtx_ptr_iterator::array_type array;
11597   FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11598     {
11599       rtx x = **iter;
11600       scalar_int_mode mode, inner_mode;
11601       if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11602 	continue;
11603       int size;
11604 
11605       if (GET_CODE (x) == ZERO_EXTRACT
11606 	  && CONST_INT_P (XEXP (x, 1))
11607 	  && CONST_INT_P (XEXP (x, 2))
11608 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11609 	  && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11610 	{
11611 	  size = INTVAL (XEXP (x, 1));
11612 
11613 	  int start = INTVAL (XEXP (x, 2));
11614 	  if (BITS_BIG_ENDIAN)
11615 	    start = GET_MODE_PRECISION (inner_mode) - size - start;
11616 
11617 	  if (start != 0)
11618 	    x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11619 				  gen_int_shift_amount (inner_mode, start));
11620 	  else
11621 	    x = XEXP (x, 0);
11622 
11623 	  if (mode != inner_mode)
11624 	    {
11625 	      if (REG_P (x) && HARD_REGISTER_P (x)
11626 		  && !can_change_dest_mode (x, 0, mode))
11627 		continue;
11628 
11629 	      x = gen_lowpart_SUBREG (mode, x);
11630 	    }
11631 	}
11632       else if (GET_CODE (x) == ZERO_EXTEND
11633 	       && GET_CODE (XEXP (x, 0)) == SUBREG
11634 	       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11635 	       && !paradoxical_subreg_p (XEXP (x, 0))
11636 	       && subreg_lowpart_p (XEXP (x, 0)))
11637 	{
11638 	  inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11639 	  size = GET_MODE_PRECISION (inner_mode);
11640 	  x = SUBREG_REG (XEXP (x, 0));
11641 	  if (GET_MODE (x) != mode)
11642 	    {
11643 	      if (REG_P (x) && HARD_REGISTER_P (x)
11644 		  && !can_change_dest_mode (x, 0, mode))
11645 		continue;
11646 
11647 	      x = gen_lowpart_SUBREG (mode, x);
11648 	    }
11649 	}
11650       else if (GET_CODE (x) == ZERO_EXTEND
11651 	       && REG_P (XEXP (x, 0))
11652 	       && HARD_REGISTER_P (XEXP (x, 0))
11653 	       && can_change_dest_mode (XEXP (x, 0), 0, mode))
11654 	{
11655 	  inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11656 	  size = GET_MODE_PRECISION (inner_mode);
11657 	  x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11658 	}
11659       else
11660 	continue;
11661 
11662       if (!(GET_CODE (x) == LSHIFTRT
11663 	    && CONST_INT_P (XEXP (x, 1))
11664 	    && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11665 	{
11666 	  wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11667 	  x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11668 	}
11669 
11670       SUBST (**iter, x);
11671       changed = true;
11672     }
11673 
11674   if (changed)
11675     FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11676       maybe_swap_commutative_operands (**iter);
11677 
11678   rtx *dst = &SET_DEST (pat);
11679   scalar_int_mode mode;
11680   if (GET_CODE (*dst) == ZERO_EXTRACT
11681       && REG_P (XEXP (*dst, 0))
11682       && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11683       && CONST_INT_P (XEXP (*dst, 1))
11684       && CONST_INT_P (XEXP (*dst, 2)))
11685     {
11686       rtx reg = XEXP (*dst, 0);
11687       int width = INTVAL (XEXP (*dst, 1));
11688       int offset = INTVAL (XEXP (*dst, 2));
11689       int reg_width = GET_MODE_PRECISION (mode);
11690       if (BITS_BIG_ENDIAN)
11691 	offset = reg_width - width - offset;
11692 
11693       rtx x, y, z, w;
11694       wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11695       wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11696       x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11697       if (offset)
11698 	y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11699       else
11700 	y = SET_SRC (pat);
11701       z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11702       w = gen_rtx_IOR (mode, x, z);
11703       SUBST (SET_DEST (pat), reg);
11704       SUBST (SET_SRC (pat), w);
11705 
11706       changed = true;
11707     }
11708 
11709   return changed;
11710 }
11711 
11712 /* Like recog, but we receive the address of a pointer to a new pattern.
11713    We try to match the rtx that the pointer points to.
11714    If that fails, we may try to modify or replace the pattern,
11715    storing the replacement into the same pointer object.
11716 
11717    Modifications include deletion or addition of CLOBBERs.  If the
11718    instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11719    to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11720    (and undo if that fails).
11721 
11722    PNOTES is a pointer to a location where any REG_UNUSED notes added for
11723    the CLOBBERs are placed.
11724 
11725    The value is the final insn code from the pattern ultimately matched,
11726    or -1.  */
11727 
11728 static int
11729 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11730 {
11731   rtx pat = *pnewpat;
11732   int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11733   if (insn_code_number >= 0 || check_asm_operands (pat))
11734     return insn_code_number;
11735 
11736   void *marker = get_undo_marker ();
11737   bool changed = false;
11738 
11739   if (GET_CODE (pat) == SET)
11740     changed = change_zero_ext (pat);
11741   else if (GET_CODE (pat) == PARALLEL)
11742     {
11743       int i;
11744       for (i = 0; i < XVECLEN (pat, 0); i++)
11745 	{
11746 	  rtx set = XVECEXP (pat, 0, i);
11747 	  if (GET_CODE (set) == SET)
11748 	    changed |= change_zero_ext (set);
11749 	}
11750     }
11751 
11752   if (changed)
11753     {
11754       insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11755 
11756       if (insn_code_number < 0)
11757 	undo_to_marker (marker);
11758     }
11759 
11760   return insn_code_number;
11761 }
11762 
11763 /* Like gen_lowpart_general but for use by combine.  In combine it
11764    is not possible to create any new pseudoregs.  However, it is
11765    safe to create invalid memory addresses, because combine will
11766    try to recognize them and all they will do is make the combine
11767    attempt fail.
11768 
11769    If for some reason this cannot do its job, an rtx
11770    (clobber (const_int 0)) is returned.
11771    An insn containing that will not be recognized.  */
11772 
11773 static rtx
11774 gen_lowpart_for_combine (machine_mode omode, rtx x)
11775 {
11776   machine_mode imode = GET_MODE (x);
11777   rtx result;
11778 
11779   if (omode == imode)
11780     return x;
11781 
11782   /* We can only support MODE being wider than a word if X is a
11783      constant integer or has a mode the same size.  */
11784   if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11785       && ! (CONST_SCALAR_INT_P (x)
11786 	    || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11787     goto fail;
11788 
11789   /* X might be a paradoxical (subreg (mem)).  In that case, gen_lowpart
11790      won't know what to do.  So we will strip off the SUBREG here and
11791      process normally.  */
11792   if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11793     {
11794       x = SUBREG_REG (x);
11795 
11796       /* For use in case we fall down into the address adjustments
11797 	 further below, we need to adjust the known mode and size of
11798 	 x; imode and isize, since we just adjusted x.  */
11799       imode = GET_MODE (x);
11800 
11801       if (imode == omode)
11802 	return x;
11803     }
11804 
11805   result = gen_lowpart_common (omode, x);
11806 
11807   if (result)
11808     return result;
11809 
11810   if (MEM_P (x))
11811     {
11812       /* Refuse to work on a volatile memory ref or one with a mode-dependent
11813 	 address.  */
11814       if (MEM_VOLATILE_P (x)
11815 	  || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11816 	goto fail;
11817 
11818       /* If we want to refer to something bigger than the original memref,
11819 	 generate a paradoxical subreg instead.  That will force a reload
11820 	 of the original memref X.  */
11821       if (paradoxical_subreg_p (omode, imode))
11822 	return gen_rtx_SUBREG (omode, x, 0);
11823 
11824       poly_int64 offset = byte_lowpart_offset (omode, imode);
11825       return adjust_address_nv (x, omode, offset);
11826     }
11827 
11828   /* If X is a comparison operator, rewrite it in a new mode.  This
11829      probably won't match, but may allow further simplifications.  */
11830   else if (COMPARISON_P (x))
11831     return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11832 
11833   /* If we couldn't simplify X any other way, just enclose it in a
11834      SUBREG.  Normally, this SUBREG won't match, but some patterns may
11835      include an explicit SUBREG or we may simplify it further in combine.  */
11836   else
11837     {
11838       rtx res;
11839 
11840       if (imode == VOIDmode)
11841 	{
11842 	  imode = int_mode_for_mode (omode).require ();
11843 	  x = gen_lowpart_common (imode, x);
11844 	  if (x == NULL)
11845 	    goto fail;
11846 	}
11847       res = lowpart_subreg (omode, x, imode);
11848       if (res)
11849 	return res;
11850     }
11851 
11852  fail:
11853   return gen_rtx_CLOBBER (omode, const0_rtx);
11854 }
11855 
11856 /* Try to simplify a comparison between OP0 and a constant OP1,
11857    where CODE is the comparison code that will be tested, into a
11858    (CODE OP0 const0_rtx) form.
11859 
11860    The result is a possibly different comparison code to use.
11861    *POP1 may be updated.  */
11862 
11863 static enum rtx_code
11864 simplify_compare_const (enum rtx_code code, machine_mode mode,
11865 			rtx op0, rtx *pop1)
11866 {
11867   scalar_int_mode int_mode;
11868   HOST_WIDE_INT const_op = INTVAL (*pop1);
11869 
11870   /* Get the constant we are comparing against and turn off all bits
11871      not on in our mode.  */
11872   if (mode != VOIDmode)
11873     const_op = trunc_int_for_mode (const_op, mode);
11874 
11875   /* If we are comparing against a constant power of two and the value
11876      being compared can only have that single bit nonzero (e.g., it was
11877      `and'ed with that bit), we can replace this with a comparison
11878      with zero.  */
11879   if (const_op
11880       && (code == EQ || code == NE || code == GE || code == GEU
11881 	  || code == LT || code == LTU)
11882       && is_a <scalar_int_mode> (mode, &int_mode)
11883       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11884       && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11885       && (nonzero_bits (op0, int_mode)
11886 	  == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11887     {
11888       code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11889       const_op = 0;
11890     }
11891 
11892   /* Similarly, if we are comparing a value known to be either -1 or
11893      0 with -1, change it to the opposite comparison against zero.  */
11894   if (const_op == -1
11895       && (code == EQ || code == NE || code == GT || code == LE
11896 	  || code == GEU || code == LTU)
11897       && is_a <scalar_int_mode> (mode, &int_mode)
11898       && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11899     {
11900       code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11901       const_op = 0;
11902     }
11903 
11904   /* Do some canonicalizations based on the comparison code.  We prefer
11905      comparisons against zero and then prefer equality comparisons.
11906      If we can reduce the size of a constant, we will do that too.  */
11907   switch (code)
11908     {
11909     case LT:
11910       /* < C is equivalent to <= (C - 1) */
11911       if (const_op > 0)
11912 	{
11913 	  const_op -= 1;
11914 	  code = LE;
11915 	  /* ... fall through to LE case below.  */
11916 	  gcc_fallthrough ();
11917 	}
11918       else
11919 	break;
11920 
11921     case LE:
11922       /* <= C is equivalent to < (C + 1); we do this for C < 0  */
11923       if (const_op < 0)
11924 	{
11925 	  const_op += 1;
11926 	  code = LT;
11927 	}
11928 
11929       /* If we are doing a <= 0 comparison on a value known to have
11930 	 a zero sign bit, we can replace this with == 0.  */
11931       else if (const_op == 0
11932 	       && is_a <scalar_int_mode> (mode, &int_mode)
11933 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11934 	       && (nonzero_bits (op0, int_mode)
11935 		   & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11936 	       == 0)
11937 	code = EQ;
11938       break;
11939 
11940     case GE:
11941       /* >= C is equivalent to > (C - 1).  */
11942       if (const_op > 0)
11943 	{
11944 	  const_op -= 1;
11945 	  code = GT;
11946 	  /* ... fall through to GT below.  */
11947 	  gcc_fallthrough ();
11948 	}
11949       else
11950 	break;
11951 
11952     case GT:
11953       /* > C is equivalent to >= (C + 1); we do this for C < 0.  */
11954       if (const_op < 0)
11955 	{
11956 	  const_op += 1;
11957 	  code = GE;
11958 	}
11959 
11960       /* If we are doing a > 0 comparison on a value known to have
11961 	 a zero sign bit, we can replace this with != 0.  */
11962       else if (const_op == 0
11963 	       && is_a <scalar_int_mode> (mode, &int_mode)
11964 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11965 	       && (nonzero_bits (op0, int_mode)
11966 		   & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11967 	       == 0)
11968 	code = NE;
11969       break;
11970 
11971     case LTU:
11972       /* < C is equivalent to <= (C - 1).  */
11973       if (const_op > 0)
11974 	{
11975 	  const_op -= 1;
11976 	  code = LEU;
11977 	  /* ... fall through ...  */
11978 	  gcc_fallthrough ();
11979 	}
11980       /* (unsigned) < 0x80000000 is equivalent to >= 0.  */
11981       else if (is_a <scalar_int_mode> (mode, &int_mode)
11982 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11983 	       && ((unsigned HOST_WIDE_INT) const_op
11984 		   == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11985 	{
11986 	  const_op = 0;
11987 	  code = GE;
11988 	  break;
11989 	}
11990       else
11991 	break;
11992 
11993     case LEU:
11994       /* unsigned <= 0 is equivalent to == 0 */
11995       if (const_op == 0)
11996 	code = EQ;
11997       /* (unsigned) <= 0x7fffffff is equivalent to >= 0.  */
11998       else if (is_a <scalar_int_mode> (mode, &int_mode)
11999 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12000 	       && ((unsigned HOST_WIDE_INT) const_op
12001 		   == ((HOST_WIDE_INT_1U
12002 			<< (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
12003 	{
12004 	  const_op = 0;
12005 	  code = GE;
12006 	}
12007       break;
12008 
12009     case GEU:
12010       /* >= C is equivalent to > (C - 1).  */
12011       if (const_op > 1)
12012 	{
12013 	  const_op -= 1;
12014 	  code = GTU;
12015 	  /* ... fall through ...  */
12016 	  gcc_fallthrough ();
12017 	}
12018 
12019       /* (unsigned) >= 0x80000000 is equivalent to < 0.  */
12020       else if (is_a <scalar_int_mode> (mode, &int_mode)
12021 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12022 	       && ((unsigned HOST_WIDE_INT) const_op
12023 		   == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
12024 	{
12025 	  const_op = 0;
12026 	  code = LT;
12027 	  break;
12028 	}
12029       else
12030 	break;
12031 
12032     case GTU:
12033       /* unsigned > 0 is equivalent to != 0 */
12034       if (const_op == 0)
12035 	code = NE;
12036       /* (unsigned) > 0x7fffffff is equivalent to < 0.  */
12037       else if (is_a <scalar_int_mode> (mode, &int_mode)
12038 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12039 	       && ((unsigned HOST_WIDE_INT) const_op
12040 		   == (HOST_WIDE_INT_1U
12041 		       << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
12042 	{
12043 	  const_op = 0;
12044 	  code = LT;
12045 	}
12046       break;
12047 
12048     default:
12049       break;
12050     }
12051 
12052   *pop1 = GEN_INT (const_op);
12053   return code;
12054 }
12055 
12056 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12057    comparison code that will be tested.
12058 
12059    The result is a possibly different comparison code to use.  *POP0 and
12060    *POP1 may be updated.
12061 
12062    It is possible that we might detect that a comparison is either always
12063    true or always false.  However, we do not perform general constant
12064    folding in combine, so this knowledge isn't useful.  Such tautologies
12065    should have been detected earlier.  Hence we ignore all such cases.  */
12066 
12067 static enum rtx_code
12068 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
12069 {
12070   rtx op0 = *pop0;
12071   rtx op1 = *pop1;
12072   rtx tem, tem1;
12073   int i;
12074   scalar_int_mode mode, inner_mode, tmode;
12075   opt_scalar_int_mode tmode_iter;
12076 
12077   /* Try a few ways of applying the same transformation to both operands.  */
12078   while (1)
12079     {
12080       /* The test below this one won't handle SIGN_EXTENDs on these machines,
12081 	 so check specially.  */
12082       if (!WORD_REGISTER_OPERATIONS
12083 	  && code != GTU && code != GEU && code != LTU && code != LEU
12084 	  && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
12085 	  && GET_CODE (XEXP (op0, 0)) == ASHIFT
12086 	  && GET_CODE (XEXP (op1, 0)) == ASHIFT
12087 	  && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12088 	  && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12089 	  && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12090 	  && (is_a <scalar_int_mode>
12091 	      (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12092 	  && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12093 	  && CONST_INT_P (XEXP (op0, 1))
12094 	  && XEXP (op0, 1) == XEXP (op1, 1)
12095 	  && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12096 	  && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12097 	  && (INTVAL (XEXP (op0, 1))
12098 	      == (GET_MODE_PRECISION (mode)
12099 		  - GET_MODE_PRECISION (inner_mode))))
12100 	{
12101 	  op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12102 	  op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12103 	}
12104 
12105       /* If both operands are the same constant shift, see if we can ignore the
12106 	 shift.  We can if the shift is a rotate or if the bits shifted out of
12107 	 this shift are known to be zero for both inputs and if the type of
12108 	 comparison is compatible with the shift.  */
12109       if (GET_CODE (op0) == GET_CODE (op1)
12110 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12111 	  && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12112 	      || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12113 		  && (code != GT && code != LT && code != GE && code != LE))
12114 	      || (GET_CODE (op0) == ASHIFTRT
12115 		  && (code != GTU && code != LTU
12116 		      && code != GEU && code != LEU)))
12117 	  && CONST_INT_P (XEXP (op0, 1))
12118 	  && INTVAL (XEXP (op0, 1)) >= 0
12119 	  && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12120 	  && XEXP (op0, 1) == XEXP (op1, 1))
12121 	{
12122 	  machine_mode mode = GET_MODE (op0);
12123 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12124 	  int shift_count = INTVAL (XEXP (op0, 1));
12125 
12126 	  if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12127 	    mask &= (mask >> shift_count) << shift_count;
12128 	  else if (GET_CODE (op0) == ASHIFT)
12129 	    mask = (mask & (mask << shift_count)) >> shift_count;
12130 
12131 	  if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12132 	      && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12133 	    op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12134 	  else
12135 	    break;
12136 	}
12137 
12138       /* If both operands are AND's of a paradoxical SUBREG by constant, the
12139 	 SUBREGs are of the same mode, and, in both cases, the AND would
12140 	 be redundant if the comparison was done in the narrower mode,
12141 	 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12142 	 and the operand's possibly nonzero bits are 0xffffff01; in that case
12143 	 if we only care about QImode, we don't need the AND).  This case
12144 	 occurs if the output mode of an scc insn is not SImode and
12145 	 STORE_FLAG_VALUE == 1 (e.g., the 386).
12146 
12147 	 Similarly, check for a case where the AND's are ZERO_EXTEND
12148 	 operations from some narrower mode even though a SUBREG is not
12149 	 present.  */
12150 
12151       else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12152 	       && CONST_INT_P (XEXP (op0, 1))
12153 	       && CONST_INT_P (XEXP (op1, 1)))
12154 	{
12155 	  rtx inner_op0 = XEXP (op0, 0);
12156 	  rtx inner_op1 = XEXP (op1, 0);
12157 	  HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12158 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12159 	  int changed = 0;
12160 
12161 	  if (paradoxical_subreg_p (inner_op0)
12162 	      && GET_CODE (inner_op1) == SUBREG
12163 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12164 	      && (GET_MODE (SUBREG_REG (inner_op0))
12165 		  == GET_MODE (SUBREG_REG (inner_op1)))
12166 	      && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12167 					GET_MODE (SUBREG_REG (inner_op0)))) == 0
12168 	      && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12169 					GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12170 	    {
12171 	      op0 = SUBREG_REG (inner_op0);
12172 	      op1 = SUBREG_REG (inner_op1);
12173 
12174 	      /* The resulting comparison is always unsigned since we masked
12175 		 off the original sign bit.  */
12176 	      code = unsigned_condition (code);
12177 
12178 	      changed = 1;
12179 	    }
12180 
12181 	  else if (c0 == c1)
12182 	    FOR_EACH_MODE_UNTIL (tmode,
12183 				 as_a <scalar_int_mode> (GET_MODE (op0)))
12184 	      if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12185 		{
12186 		  op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12187 		  op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12188 		  code = unsigned_condition (code);
12189 		  changed = 1;
12190 		  break;
12191 		}
12192 
12193 	  if (! changed)
12194 	    break;
12195 	}
12196 
12197       /* If both operands are NOT, we can strip off the outer operation
12198 	 and adjust the comparison code for swapped operands; similarly for
12199 	 NEG, except that this must be an equality comparison.  */
12200       else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12201 	       || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12202 		   && (code == EQ || code == NE)))
12203 	op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12204 
12205       else
12206 	break;
12207     }
12208 
12209   /* If the first operand is a constant, swap the operands and adjust the
12210      comparison code appropriately, but don't do this if the second operand
12211      is already a constant integer.  */
12212   if (swap_commutative_operands_p (op0, op1))
12213     {
12214       std::swap (op0, op1);
12215       code = swap_condition (code);
12216     }
12217 
12218   /* We now enter a loop during which we will try to simplify the comparison.
12219      For the most part, we only are concerned with comparisons with zero,
12220      but some things may really be comparisons with zero but not start
12221      out looking that way.  */
12222 
12223   while (CONST_INT_P (op1))
12224     {
12225       machine_mode raw_mode = GET_MODE (op0);
12226       scalar_int_mode int_mode;
12227       int equality_comparison_p;
12228       int sign_bit_comparison_p;
12229       int unsigned_comparison_p;
12230       HOST_WIDE_INT const_op;
12231 
12232       /* We only want to handle integral modes.  This catches VOIDmode,
12233 	 CCmode, and the floating-point modes.  An exception is that we
12234 	 can handle VOIDmode if OP0 is a COMPARE or a comparison
12235 	 operation.  */
12236 
12237       if (GET_MODE_CLASS (raw_mode) != MODE_INT
12238 	  && ! (raw_mode == VOIDmode
12239 		&& (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12240 	break;
12241 
12242       /* Try to simplify the compare to constant, possibly changing the
12243 	 comparison op, and/or changing op1 to zero.  */
12244       code = simplify_compare_const (code, raw_mode, op0, &op1);
12245       const_op = INTVAL (op1);
12246 
12247       /* Compute some predicates to simplify code below.  */
12248 
12249       equality_comparison_p = (code == EQ || code == NE);
12250       sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12251       unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12252 			       || code == GEU);
12253 
12254       /* If this is a sign bit comparison and we can do arithmetic in
12255 	 MODE, say that we will only be needing the sign bit of OP0.  */
12256       if (sign_bit_comparison_p
12257 	  && is_a <scalar_int_mode> (raw_mode, &int_mode)
12258 	  && HWI_COMPUTABLE_MODE_P (int_mode))
12259 	op0 = force_to_mode (op0, int_mode,
12260 			     HOST_WIDE_INT_1U
12261 			     << (GET_MODE_PRECISION (int_mode) - 1),
12262 			     0);
12263 
12264       if (COMPARISON_P (op0))
12265 	{
12266 	  /* We can't do anything if OP0 is a condition code value, rather
12267 	     than an actual data value.  */
12268 	  if (const_op != 0
12269 	      || CC0_P (XEXP (op0, 0))
12270 	      || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12271 	    break;
12272 
12273 	  /* Get the two operands being compared.  */
12274 	  if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12275 	    tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12276 	  else
12277 	    tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12278 
12279 	  /* Check for the cases where we simply want the result of the
12280 	     earlier test or the opposite of that result.  */
12281 	  if (code == NE || code == EQ
12282 	      || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12283 		  && (code == LT || code == GE)))
12284 	    {
12285 	      enum rtx_code new_code;
12286 	      if (code == LT || code == NE)
12287 		new_code = GET_CODE (op0);
12288 	      else
12289 		new_code = reversed_comparison_code (op0, NULL);
12290 
12291 	      if (new_code != UNKNOWN)
12292 		{
12293 		  code = new_code;
12294 		  op0 = tem;
12295 		  op1 = tem1;
12296 		  continue;
12297 		}
12298 	    }
12299 	  break;
12300 	}
12301 
12302       if (raw_mode == VOIDmode)
12303 	break;
12304       scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12305 
12306       /* Now try cases based on the opcode of OP0.  If none of the cases
12307 	 does a "continue", we exit this loop immediately after the
12308 	 switch.  */
12309 
12310       unsigned int mode_width = GET_MODE_PRECISION (mode);
12311       unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12312       switch (GET_CODE (op0))
12313 	{
12314 	case ZERO_EXTRACT:
12315 	  /* If we are extracting a single bit from a variable position in
12316 	     a constant that has only a single bit set and are comparing it
12317 	     with zero, we can convert this into an equality comparison
12318 	     between the position and the location of the single bit.  */
12319 	  /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12320 	     have already reduced the shift count modulo the word size.  */
12321 	  if (!SHIFT_COUNT_TRUNCATED
12322 	      && CONST_INT_P (XEXP (op0, 0))
12323 	      && XEXP (op0, 1) == const1_rtx
12324 	      && equality_comparison_p && const_op == 0
12325 	      && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12326 	    {
12327 	      if (BITS_BIG_ENDIAN)
12328 		i = BITS_PER_WORD - 1 - i;
12329 
12330 	      op0 = XEXP (op0, 2);
12331 	      op1 = GEN_INT (i);
12332 	      const_op = i;
12333 
12334 	      /* Result is nonzero iff shift count is equal to I.  */
12335 	      code = reverse_condition (code);
12336 	      continue;
12337 	    }
12338 
12339 	  /* fall through */
12340 
12341 	case SIGN_EXTRACT:
12342 	  tem = expand_compound_operation (op0);
12343 	  if (tem != op0)
12344 	    {
12345 	      op0 = tem;
12346 	      continue;
12347 	    }
12348 	  break;
12349 
12350 	case NOT:
12351 	  /* If testing for equality, we can take the NOT of the constant.  */
12352 	  if (equality_comparison_p
12353 	      && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12354 	    {
12355 	      op0 = XEXP (op0, 0);
12356 	      op1 = tem;
12357 	      continue;
12358 	    }
12359 
12360 	  /* If just looking at the sign bit, reverse the sense of the
12361 	     comparison.  */
12362 	  if (sign_bit_comparison_p)
12363 	    {
12364 	      op0 = XEXP (op0, 0);
12365 	      code = (code == GE ? LT : GE);
12366 	      continue;
12367 	    }
12368 	  break;
12369 
12370 	case NEG:
12371 	  /* If testing for equality, we can take the NEG of the constant.  */
12372 	  if (equality_comparison_p
12373 	      && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12374 	    {
12375 	      op0 = XEXP (op0, 0);
12376 	      op1 = tem;
12377 	      continue;
12378 	    }
12379 
12380 	  /* The remaining cases only apply to comparisons with zero.  */
12381 	  if (const_op != 0)
12382 	    break;
12383 
12384 	  /* When X is ABS or is known positive,
12385 	     (neg X) is < 0 if and only if X != 0.  */
12386 
12387 	  if (sign_bit_comparison_p
12388 	      && (GET_CODE (XEXP (op0, 0)) == ABS
12389 		  || (mode_width <= HOST_BITS_PER_WIDE_INT
12390 		      && (nonzero_bits (XEXP (op0, 0), mode)
12391 			  & (HOST_WIDE_INT_1U << (mode_width - 1)))
12392 			 == 0)))
12393 	    {
12394 	      op0 = XEXP (op0, 0);
12395 	      code = (code == LT ? NE : EQ);
12396 	      continue;
12397 	    }
12398 
12399 	  /* If we have NEG of something whose two high-order bits are the
12400 	     same, we know that "(-a) < 0" is equivalent to "a > 0".  */
12401 	  if (num_sign_bit_copies (op0, mode) >= 2)
12402 	    {
12403 	      op0 = XEXP (op0, 0);
12404 	      code = swap_condition (code);
12405 	      continue;
12406 	    }
12407 	  break;
12408 
12409 	case ROTATE:
12410 	  /* If we are testing equality and our count is a constant, we
12411 	     can perform the inverse operation on our RHS.  */
12412 	  if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12413 	      && (tem = simplify_binary_operation (ROTATERT, mode,
12414 						   op1, XEXP (op0, 1))) != 0)
12415 	    {
12416 	      op0 = XEXP (op0, 0);
12417 	      op1 = tem;
12418 	      continue;
12419 	    }
12420 
12421 	  /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12422 	     a particular bit.  Convert it to an AND of a constant of that
12423 	     bit.  This will be converted into a ZERO_EXTRACT.  */
12424 	  if (const_op == 0 && sign_bit_comparison_p
12425 	      && CONST_INT_P (XEXP (op0, 1))
12426 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12427 	      && UINTVAL (XEXP (op0, 1)) < mode_width)
12428 	    {
12429 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12430 					    (HOST_WIDE_INT_1U
12431 					     << (mode_width - 1
12432 						 - INTVAL (XEXP (op0, 1)))));
12433 	      code = (code == LT ? NE : EQ);
12434 	      continue;
12435 	    }
12436 
12437 	  /* Fall through.  */
12438 
12439 	case ABS:
12440 	  /* ABS is ignorable inside an equality comparison with zero.  */
12441 	  if (const_op == 0 && equality_comparison_p)
12442 	    {
12443 	      op0 = XEXP (op0, 0);
12444 	      continue;
12445 	    }
12446 	  break;
12447 
12448 	case SIGN_EXTEND:
12449 	  /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12450 	     (compare FOO CONST) if CONST fits in FOO's mode and we
12451 	     are either testing inequality or have an unsigned
12452 	     comparison with ZERO_EXTEND or a signed comparison with
12453 	     SIGN_EXTEND.  But don't do it if we don't have a compare
12454 	     insn of the given mode, since we'd have to revert it
12455 	     later on, and then we wouldn't know whether to sign- or
12456 	     zero-extend.  */
12457 	  if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12458 	      && ! unsigned_comparison_p
12459 	      && HWI_COMPUTABLE_MODE_P (mode)
12460 	      && trunc_int_for_mode (const_op, mode) == const_op
12461 	      && have_insn_for (COMPARE, mode))
12462 	    {
12463 	      op0 = XEXP (op0, 0);
12464 	      continue;
12465 	    }
12466 	  break;
12467 
12468 	case SUBREG:
12469 	  /* Check for the case where we are comparing A - C1 with C2, that is
12470 
12471 	       (subreg:MODE (plus (A) (-C1))) op (C2)
12472 
12473 	     with C1 a constant, and try to lift the SUBREG, i.e. to do the
12474 	     comparison in the wider mode.  One of the following two conditions
12475 	     must be true in order for this to be valid:
12476 
12477 	       1. The mode extension results in the same bit pattern being added
12478 		  on both sides and the comparison is equality or unsigned.  As
12479 		  C2 has been truncated to fit in MODE, the pattern can only be
12480 		  all 0s or all 1s.
12481 
12482 	       2. The mode extension results in the sign bit being copied on
12483 		  each side.
12484 
12485 	     The difficulty here is that we have predicates for A but not for
12486 	     (A - C1) so we need to check that C1 is within proper bounds so
12487 	     as to perturbate A as little as possible.  */
12488 
12489 	  if (mode_width <= HOST_BITS_PER_WIDE_INT
12490 	      && subreg_lowpart_p (op0)
12491 	      && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12492 					 &inner_mode)
12493 	      && GET_MODE_PRECISION (inner_mode) > mode_width
12494 	      && GET_CODE (SUBREG_REG (op0)) == PLUS
12495 	      && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12496 	    {
12497 	      rtx a = XEXP (SUBREG_REG (op0), 0);
12498 	      HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12499 
12500 	      if ((c1 > 0
12501 		   && (unsigned HOST_WIDE_INT) c1
12502 		       < HOST_WIDE_INT_1U << (mode_width - 1)
12503 		   && (equality_comparison_p || unsigned_comparison_p)
12504 		   /* (A - C1) zero-extends if it is positive and sign-extends
12505 		      if it is negative, C2 both zero- and sign-extends.  */
12506 		   && (((nonzero_bits (a, inner_mode)
12507 			 & ~GET_MODE_MASK (mode)) == 0
12508 			&& const_op >= 0)
12509 		       /* (A - C1) sign-extends if it is positive and 1-extends
12510 			  if it is negative, C2 both sign- and 1-extends.  */
12511 		       || (num_sign_bit_copies (a, inner_mode)
12512 			   > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12513 					     - mode_width)
12514 			   && const_op < 0)))
12515 		  || ((unsigned HOST_WIDE_INT) c1
12516 		       < HOST_WIDE_INT_1U << (mode_width - 2)
12517 		      /* (A - C1) always sign-extends, like C2.  */
12518 		      && num_sign_bit_copies (a, inner_mode)
12519 			 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12520 					   - (mode_width - 1))))
12521 		{
12522 		  op0 = SUBREG_REG (op0);
12523 		  continue;
12524 		}
12525 	    }
12526 
12527 	  /* If the inner mode is narrower and we are extracting the low part,
12528 	     we can treat the SUBREG as if it were a ZERO_EXTEND.  */
12529 	  if (paradoxical_subreg_p (op0))
12530 	    ;
12531 	  else if (subreg_lowpart_p (op0)
12532 		   && GET_MODE_CLASS (mode) == MODE_INT
12533 		   && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12534 		   && (code == NE || code == EQ)
12535 		   && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12536 		   && !paradoxical_subreg_p (op0)
12537 		   && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12538 		       & ~GET_MODE_MASK (mode)) == 0)
12539 	    {
12540 	      /* Remove outer subregs that don't do anything.  */
12541 	      tem = gen_lowpart (inner_mode, op1);
12542 
12543 	      if ((nonzero_bits (tem, inner_mode)
12544 		   & ~GET_MODE_MASK (mode)) == 0)
12545 		{
12546 		  op0 = SUBREG_REG (op0);
12547 		  op1 = tem;
12548 		  continue;
12549 		}
12550 	      break;
12551 	    }
12552 	  else
12553 	    break;
12554 
12555 	  /* FALLTHROUGH */
12556 
12557 	case ZERO_EXTEND:
12558 	  if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12559 	      && (unsigned_comparison_p || equality_comparison_p)
12560 	      && HWI_COMPUTABLE_MODE_P (mode)
12561 	      && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12562 	      && const_op >= 0
12563 	      && have_insn_for (COMPARE, mode))
12564 	    {
12565 	      op0 = XEXP (op0, 0);
12566 	      continue;
12567 	    }
12568 	  break;
12569 
12570 	case PLUS:
12571 	  /* (eq (plus X A) B) -> (eq X (minus B A)).  We can only do
12572 	     this for equality comparisons due to pathological cases involving
12573 	     overflows.  */
12574 	  if (equality_comparison_p
12575 	      && (tem = simplify_binary_operation (MINUS, mode,
12576 						   op1, XEXP (op0, 1))) != 0)
12577 	    {
12578 	      op0 = XEXP (op0, 0);
12579 	      op1 = tem;
12580 	      continue;
12581 	    }
12582 
12583 	  /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0.  */
12584 	  if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12585 	      && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12586 	    {
12587 	      op0 = XEXP (XEXP (op0, 0), 0);
12588 	      code = (code == LT ? EQ : NE);
12589 	      continue;
12590 	    }
12591 	  break;
12592 
12593 	case MINUS:
12594 	  /* We used to optimize signed comparisons against zero, but that
12595 	     was incorrect.  Unsigned comparisons against zero (GTU, LEU)
12596 	     arrive here as equality comparisons, or (GEU, LTU) are
12597 	     optimized away.  No need to special-case them.  */
12598 
12599 	  /* (eq (minus A B) C) -> (eq A (plus B C)) or
12600 	     (eq B (minus A C)), whichever simplifies.  We can only do
12601 	     this for equality comparisons due to pathological cases involving
12602 	     overflows.  */
12603 	  if (equality_comparison_p
12604 	      && (tem = simplify_binary_operation (PLUS, mode,
12605 						   XEXP (op0, 1), op1)) != 0)
12606 	    {
12607 	      op0 = XEXP (op0, 0);
12608 	      op1 = tem;
12609 	      continue;
12610 	    }
12611 
12612 	  if (equality_comparison_p
12613 	      && (tem = simplify_binary_operation (MINUS, mode,
12614 						   XEXP (op0, 0), op1)) != 0)
12615 	    {
12616 	      op0 = XEXP (op0, 1);
12617 	      op1 = tem;
12618 	      continue;
12619 	    }
12620 
12621 	  /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12622 	     of bits in X minus 1, is one iff X > 0.  */
12623 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12624 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12625 	      && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12626 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12627 	    {
12628 	      op0 = XEXP (op0, 1);
12629 	      code = (code == GE ? LE : GT);
12630 	      continue;
12631 	    }
12632 	  break;
12633 
12634 	case XOR:
12635 	  /* (eq (xor A B) C) -> (eq A (xor B C)).  This is a simplification
12636 	     if C is zero or B is a constant.  */
12637 	  if (equality_comparison_p
12638 	      && (tem = simplify_binary_operation (XOR, mode,
12639 						   XEXP (op0, 1), op1)) != 0)
12640 	    {
12641 	      op0 = XEXP (op0, 0);
12642 	      op1 = tem;
12643 	      continue;
12644 	    }
12645 	  break;
12646 
12647 
12648 	case IOR:
12649 	  /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12650 	     iff X <= 0.  */
12651 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12652 	      && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12653 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12654 	    {
12655 	      op0 = XEXP (op0, 1);
12656 	      code = (code == GE ? GT : LE);
12657 	      continue;
12658 	    }
12659 	  break;
12660 
12661 	case AND:
12662 	  /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1).  This
12663 	     will be converted to a ZERO_EXTRACT later.  */
12664 	  if (const_op == 0 && equality_comparison_p
12665 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12666 	      && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12667 	    {
12668 	      op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12669 				      XEXP (XEXP (op0, 0), 1));
12670 	      op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12671 	      continue;
12672 	    }
12673 
12674 	  /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12675 	     zero and X is a comparison and C1 and C2 describe only bits set
12676 	     in STORE_FLAG_VALUE, we can compare with X.  */
12677 	  if (const_op == 0 && equality_comparison_p
12678 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12679 	      && CONST_INT_P (XEXP (op0, 1))
12680 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12681 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12682 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12683 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12684 	    {
12685 	      mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12686 		      << INTVAL (XEXP (XEXP (op0, 0), 1)));
12687 	      if ((~STORE_FLAG_VALUE & mask) == 0
12688 		  && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12689 		      || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12690 			  && COMPARISON_P (tem))))
12691 		{
12692 		  op0 = XEXP (XEXP (op0, 0), 0);
12693 		  continue;
12694 		}
12695 	    }
12696 
12697 	  /* If we are doing an equality comparison of an AND of a bit equal
12698 	     to the sign bit, replace this with a LT or GE comparison of
12699 	     the underlying value.  */
12700 	  if (equality_comparison_p
12701 	      && const_op == 0
12702 	      && CONST_INT_P (XEXP (op0, 1))
12703 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12704 	      && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12705 		  == HOST_WIDE_INT_1U << (mode_width - 1)))
12706 	    {
12707 	      op0 = XEXP (op0, 0);
12708 	      code = (code == EQ ? GE : LT);
12709 	      continue;
12710 	    }
12711 
12712 	  /* If this AND operation is really a ZERO_EXTEND from a narrower
12713 	     mode, the constant fits within that mode, and this is either an
12714 	     equality or unsigned comparison, try to do this comparison in
12715 	     the narrower mode.
12716 
12717 	     Note that in:
12718 
12719 	     (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12720 	     -> (ne:DI (reg:SI 4) (const_int 0))
12721 
12722 	     unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12723 	     known to hold a value of the required mode the
12724 	     transformation is invalid.  */
12725 	  if ((equality_comparison_p || unsigned_comparison_p)
12726 	      && CONST_INT_P (XEXP (op0, 1))
12727 	      && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12728 				   & GET_MODE_MASK (mode))
12729 				  + 1)) >= 0
12730 	      && const_op >> i == 0
12731 	      && int_mode_for_size (i, 1).exists (&tmode))
12732 	    {
12733 	      op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12734 	      continue;
12735 	    }
12736 
12737 	  /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12738 	     fits in both M1 and M2 and the SUBREG is either paradoxical
12739 	     or represents the low part, permute the SUBREG and the AND
12740 	     and try again.  */
12741 	  if (GET_CODE (XEXP (op0, 0)) == SUBREG
12742 	      && CONST_INT_P (XEXP (op0, 1)))
12743 	    {
12744 	      unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12745 	      /* Require an integral mode, to avoid creating something like
12746 		 (AND:SF ...).  */
12747 	      if ((is_a <scalar_int_mode>
12748 		   (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12749 		  /* It is unsafe to commute the AND into the SUBREG if the
12750 		     SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12751 		     not defined.  As originally written the upper bits
12752 		     have a defined value due to the AND operation.
12753 		     However, if we commute the AND inside the SUBREG then
12754 		     they no longer have defined values and the meaning of
12755 		     the code has been changed.
12756 		     Also C1 should not change value in the smaller mode,
12757 		     see PR67028 (a positive C1 can become negative in the
12758 		     smaller mode, so that the AND does no longer mask the
12759 		     upper bits).  */
12760 		  && ((WORD_REGISTER_OPERATIONS
12761 		       && mode_width > GET_MODE_PRECISION (tmode)
12762 		       && mode_width <= BITS_PER_WORD
12763 		       && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12764 		      || (mode_width <= GET_MODE_PRECISION (tmode)
12765 			  && subreg_lowpart_p (XEXP (op0, 0))))
12766 		  && mode_width <= HOST_BITS_PER_WIDE_INT
12767 		  && HWI_COMPUTABLE_MODE_P (tmode)
12768 		  && (c1 & ~mask) == 0
12769 		  && (c1 & ~GET_MODE_MASK (tmode)) == 0
12770 		  && c1 != mask
12771 		  && c1 != GET_MODE_MASK (tmode))
12772 		{
12773 		  op0 = simplify_gen_binary (AND, tmode,
12774 					     SUBREG_REG (XEXP (op0, 0)),
12775 					     gen_int_mode (c1, tmode));
12776 		  op0 = gen_lowpart (mode, op0);
12777 		  continue;
12778 		}
12779 	    }
12780 
12781 	  /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0).  */
12782 	  if (const_op == 0 && equality_comparison_p
12783 	      && XEXP (op0, 1) == const1_rtx
12784 	      && GET_CODE (XEXP (op0, 0)) == NOT)
12785 	    {
12786 	      op0 = simplify_and_const_int (NULL_RTX, mode,
12787 					    XEXP (XEXP (op0, 0), 0), 1);
12788 	      code = (code == NE ? EQ : NE);
12789 	      continue;
12790 	    }
12791 
12792 	  /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12793 	     (eq (and (lshiftrt X) 1) 0).
12794 	     Also handle the case where (not X) is expressed using xor.  */
12795 	  if (const_op == 0 && equality_comparison_p
12796 	      && XEXP (op0, 1) == const1_rtx
12797 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12798 	    {
12799 	      rtx shift_op = XEXP (XEXP (op0, 0), 0);
12800 	      rtx shift_count = XEXP (XEXP (op0, 0), 1);
12801 
12802 	      if (GET_CODE (shift_op) == NOT
12803 		  || (GET_CODE (shift_op) == XOR
12804 		      && CONST_INT_P (XEXP (shift_op, 1))
12805 		      && CONST_INT_P (shift_count)
12806 		      && HWI_COMPUTABLE_MODE_P (mode)
12807 		      && (UINTVAL (XEXP (shift_op, 1))
12808 			  == HOST_WIDE_INT_1U
12809 			       << INTVAL (shift_count))))
12810 		{
12811 		  op0
12812 		    = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12813 		  op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12814 		  code = (code == NE ? EQ : NE);
12815 		  continue;
12816 		}
12817 	    }
12818 	  break;
12819 
12820 	case ASHIFT:
12821 	  /* If we have (compare (ashift FOO N) (const_int C)) and
12822 	     the high order N bits of FOO (N+1 if an inequality comparison)
12823 	     are known to be zero, we can do this by comparing FOO with C
12824 	     shifted right N bits so long as the low-order N bits of C are
12825 	     zero.  */
12826 	  if (CONST_INT_P (XEXP (op0, 1))
12827 	      && INTVAL (XEXP (op0, 1)) >= 0
12828 	      && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12829 		  < HOST_BITS_PER_WIDE_INT)
12830 	      && (((unsigned HOST_WIDE_INT) const_op
12831 		   & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12832 		      - 1)) == 0)
12833 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12834 	      && (nonzero_bits (XEXP (op0, 0), mode)
12835 		  & ~(mask >> (INTVAL (XEXP (op0, 1))
12836 			       + ! equality_comparison_p))) == 0)
12837 	    {
12838 	      /* We must perform a logical shift, not an arithmetic one,
12839 		 as we want the top N bits of C to be zero.  */
12840 	      unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12841 
12842 	      temp >>= INTVAL (XEXP (op0, 1));
12843 	      op1 = gen_int_mode (temp, mode);
12844 	      op0 = XEXP (op0, 0);
12845 	      continue;
12846 	    }
12847 
12848 	  /* If we are doing a sign bit comparison, it means we are testing
12849 	     a particular bit.  Convert it to the appropriate AND.  */
12850 	  if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12851 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
12852 	    {
12853 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12854 					    (HOST_WIDE_INT_1U
12855 					     << (mode_width - 1
12856 						 - INTVAL (XEXP (op0, 1)))));
12857 	      code = (code == LT ? NE : EQ);
12858 	      continue;
12859 	    }
12860 
12861 	  /* If this an equality comparison with zero and we are shifting
12862 	     the low bit to the sign bit, we can convert this to an AND of the
12863 	     low-order bit.  */
12864 	  if (const_op == 0 && equality_comparison_p
12865 	      && CONST_INT_P (XEXP (op0, 1))
12866 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12867 	    {
12868 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12869 	      continue;
12870 	    }
12871 	  break;
12872 
12873 	case ASHIFTRT:
12874 	  /* If this is an equality comparison with zero, we can do this
12875 	     as a logical shift, which might be much simpler.  */
12876 	  if (equality_comparison_p && const_op == 0
12877 	      && CONST_INT_P (XEXP (op0, 1)))
12878 	    {
12879 	      op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12880 					  XEXP (op0, 0),
12881 					  INTVAL (XEXP (op0, 1)));
12882 	      continue;
12883 	    }
12884 
12885 	  /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12886 	     do the comparison in a narrower mode.  */
12887 	  if (! unsigned_comparison_p
12888 	      && CONST_INT_P (XEXP (op0, 1))
12889 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12890 	      && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12891 	      && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12892 		  .exists (&tmode))
12893 	      && (((unsigned HOST_WIDE_INT) const_op
12894 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12895 		  <= GET_MODE_MASK (tmode)))
12896 	    {
12897 	      op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12898 	      continue;
12899 	    }
12900 
12901 	  /* Likewise if OP0 is a PLUS of a sign extension with a
12902 	     constant, which is usually represented with the PLUS
12903 	     between the shifts.  */
12904 	  if (! unsigned_comparison_p
12905 	      && CONST_INT_P (XEXP (op0, 1))
12906 	      && GET_CODE (XEXP (op0, 0)) == PLUS
12907 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12908 	      && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12909 	      && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12910 	      && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12911 		  .exists (&tmode))
12912 	      && (((unsigned HOST_WIDE_INT) const_op
12913 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12914 		  <= GET_MODE_MASK (tmode)))
12915 	    {
12916 	      rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12917 	      rtx add_const = XEXP (XEXP (op0, 0), 1);
12918 	      rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12919 						   add_const, XEXP (op0, 1));
12920 
12921 	      op0 = simplify_gen_binary (PLUS, tmode,
12922 					 gen_lowpart (tmode, inner),
12923 					 new_const);
12924 	      continue;
12925 	    }
12926 
12927 	  /* FALLTHROUGH */
12928 	case LSHIFTRT:
12929 	  /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12930 	     the low order N bits of FOO are known to be zero, we can do this
12931 	     by comparing FOO with C shifted left N bits so long as no
12932 	     overflow occurs.  Even if the low order N bits of FOO aren't known
12933 	     to be zero, if the comparison is >= or < we can use the same
12934 	     optimization and for > or <= by setting all the low
12935 	     order N bits in the comparison constant.  */
12936 	  if (CONST_INT_P (XEXP (op0, 1))
12937 	      && INTVAL (XEXP (op0, 1)) > 0
12938 	      && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12939 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12940 	      && (((unsigned HOST_WIDE_INT) const_op
12941 		   + (GET_CODE (op0) != LSHIFTRT
12942 		      ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12943 			 + 1)
12944 		      : 0))
12945 		  <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12946 	    {
12947 	      unsigned HOST_WIDE_INT low_bits
12948 		= (nonzero_bits (XEXP (op0, 0), mode)
12949 		   & ((HOST_WIDE_INT_1U
12950 		       << INTVAL (XEXP (op0, 1))) - 1));
12951 	      if (low_bits == 0 || !equality_comparison_p)
12952 		{
12953 		  /* If the shift was logical, then we must make the condition
12954 		     unsigned.  */
12955 		  if (GET_CODE (op0) == LSHIFTRT)
12956 		    code = unsigned_condition (code);
12957 
12958 		  const_op = (unsigned HOST_WIDE_INT) const_op
12959 			      << INTVAL (XEXP (op0, 1));
12960 		  if (low_bits != 0
12961 		      && (code == GT || code == GTU
12962 			  || code == LE || code == LEU))
12963 		    const_op
12964 		      |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12965 		  op1 = GEN_INT (const_op);
12966 		  op0 = XEXP (op0, 0);
12967 		  continue;
12968 		}
12969 	    }
12970 
12971 	  /* If we are using this shift to extract just the sign bit, we
12972 	     can replace this with an LT or GE comparison.  */
12973 	  if (const_op == 0
12974 	      && (equality_comparison_p || sign_bit_comparison_p)
12975 	      && CONST_INT_P (XEXP (op0, 1))
12976 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12977 	    {
12978 	      op0 = XEXP (op0, 0);
12979 	      code = (code == NE || code == GT ? LT : GE);
12980 	      continue;
12981 	    }
12982 	  break;
12983 
12984 	default:
12985 	  break;
12986 	}
12987 
12988       break;
12989     }
12990 
12991   /* Now make any compound operations involved in this comparison.  Then,
12992      check for an outmost SUBREG on OP0 that is not doing anything or is
12993      paradoxical.  The latter transformation must only be performed when
12994      it is known that the "extra" bits will be the same in op0 and op1 or
12995      that they don't matter.  There are three cases to consider:
12996 
12997      1. SUBREG_REG (op0) is a register.  In this case the bits are don't
12998      care bits and we can assume they have any convenient value.  So
12999      making the transformation is safe.
13000 
13001      2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
13002      In this case the upper bits of op0 are undefined.  We should not make
13003      the simplification in that case as we do not know the contents of
13004      those bits.
13005 
13006      3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
13007      In that case we know those bits are zeros or ones.  We must also be
13008      sure that they are the same as the upper bits of op1.
13009 
13010      We can never remove a SUBREG for a non-equality comparison because
13011      the sign bit is in a different place in the underlying object.  */
13012 
13013   rtx_code op0_mco_code = SET;
13014   if (op1 == const0_rtx)
13015     op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
13016 
13017   op0 = make_compound_operation (op0, op0_mco_code);
13018   op1 = make_compound_operation (op1, SET);
13019 
13020   if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
13021       && is_int_mode (GET_MODE (op0), &mode)
13022       && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
13023       && (code == NE || code == EQ))
13024     {
13025       if (paradoxical_subreg_p (op0))
13026 	{
13027 	  /* For paradoxical subregs, allow case 1 as above.  Case 3 isn't
13028 	     implemented.  */
13029 	  if (REG_P (SUBREG_REG (op0)))
13030 	    {
13031 	      op0 = SUBREG_REG (op0);
13032 	      op1 = gen_lowpart (inner_mode, op1);
13033 	    }
13034 	}
13035       else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
13036 	       && (nonzero_bits (SUBREG_REG (op0), inner_mode)
13037 		   & ~GET_MODE_MASK (mode)) == 0)
13038 	{
13039 	  tem = gen_lowpart (inner_mode, op1);
13040 
13041 	  if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
13042 	    op0 = SUBREG_REG (op0), op1 = tem;
13043 	}
13044     }
13045 
13046   /* We now do the opposite procedure: Some machines don't have compare
13047      insns in all modes.  If OP0's mode is an integer mode smaller than a
13048      word and we can't do a compare in that mode, see if there is a larger
13049      mode for which we can do the compare.  There are a number of cases in
13050      which we can use the wider mode.  */
13051 
13052   if (is_int_mode (GET_MODE (op0), &mode)
13053       && GET_MODE_SIZE (mode) < UNITS_PER_WORD
13054       && ! have_insn_for (COMPARE, mode))
13055     FOR_EACH_WIDER_MODE (tmode_iter, mode)
13056       {
13057 	tmode = tmode_iter.require ();
13058 	if (!HWI_COMPUTABLE_MODE_P (tmode))
13059 	  break;
13060 	if (have_insn_for (COMPARE, tmode))
13061 	  {
13062 	    int zero_extended;
13063 
13064 	    /* If this is a test for negative, we can make an explicit
13065 	       test of the sign bit.  Test this first so we can use
13066 	       a paradoxical subreg to extend OP0.  */
13067 
13068 	    if (op1 == const0_rtx && (code == LT || code == GE)
13069 		&& HWI_COMPUTABLE_MODE_P (mode))
13070 	      {
13071 		unsigned HOST_WIDE_INT sign
13072 		  = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
13073 		op0 = simplify_gen_binary (AND, tmode,
13074 					   gen_lowpart (tmode, op0),
13075 					   gen_int_mode (sign, tmode));
13076 		code = (code == LT) ? NE : EQ;
13077 		break;
13078 	      }
13079 
13080 	    /* If the only nonzero bits in OP0 and OP1 are those in the
13081 	       narrower mode and this is an equality or unsigned comparison,
13082 	       we can use the wider mode.  Similarly for sign-extended
13083 	       values, in which case it is true for all comparisons.  */
13084 	    zero_extended = ((code == EQ || code == NE
13085 			      || code == GEU || code == GTU
13086 			      || code == LEU || code == LTU)
13087 			     && (nonzero_bits (op0, tmode)
13088 				 & ~GET_MODE_MASK (mode)) == 0
13089 			     && ((CONST_INT_P (op1)
13090 				  || (nonzero_bits (op1, tmode)
13091 				      & ~GET_MODE_MASK (mode)) == 0)));
13092 
13093 	    if (zero_extended
13094 		|| ((num_sign_bit_copies (op0, tmode)
13095 		     > (unsigned int) (GET_MODE_PRECISION (tmode)
13096 				       - GET_MODE_PRECISION (mode)))
13097 		    && (num_sign_bit_copies (op1, tmode)
13098 			> (unsigned int) (GET_MODE_PRECISION (tmode)
13099 					  - GET_MODE_PRECISION (mode)))))
13100 	      {
13101 		/* If OP0 is an AND and we don't have an AND in MODE either,
13102 		   make a new AND in the proper mode.  */
13103 		if (GET_CODE (op0) == AND
13104 		    && !have_insn_for (AND, mode))
13105 		  op0 = simplify_gen_binary (AND, tmode,
13106 					     gen_lowpart (tmode,
13107 							  XEXP (op0, 0)),
13108 					     gen_lowpart (tmode,
13109 							  XEXP (op0, 1)));
13110 		else
13111 		  {
13112 		    if (zero_extended)
13113 		      {
13114 			op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13115 						  op0, mode);
13116 			op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13117 						  op1, mode);
13118 		      }
13119 		    else
13120 		      {
13121 			op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13122 						  op0, mode);
13123 			op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13124 						  op1, mode);
13125 		      }
13126 		    break;
13127 		  }
13128 	      }
13129 	  }
13130       }
13131 
13132   /* We may have changed the comparison operands.  Re-canonicalize.  */
13133   if (swap_commutative_operands_p (op0, op1))
13134     {
13135       std::swap (op0, op1);
13136       code = swap_condition (code);
13137     }
13138 
13139   /* If this machine only supports a subset of valid comparisons, see if we
13140      can convert an unsupported one into a supported one.  */
13141   target_canonicalize_comparison (&code, &op0, &op1, 0);
13142 
13143   *pop0 = op0;
13144   *pop1 = op1;
13145 
13146   return code;
13147 }
13148 
13149 /* Utility function for record_value_for_reg.  Count number of
13150    rtxs in X.  */
13151 static int
13152 count_rtxs (rtx x)
13153 {
13154   enum rtx_code code = GET_CODE (x);
13155   const char *fmt;
13156   int i, j, ret = 1;
13157 
13158   if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13159       || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13160     {
13161       rtx x0 = XEXP (x, 0);
13162       rtx x1 = XEXP (x, 1);
13163 
13164       if (x0 == x1)
13165 	return 1 + 2 * count_rtxs (x0);
13166 
13167       if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13168 	   || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13169 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13170 	return 2 + 2 * count_rtxs (x0)
13171 	       + count_rtxs (x == XEXP (x1, 0)
13172 			     ? XEXP (x1, 1) : XEXP (x1, 0));
13173 
13174       if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13175 	   || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13176 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13177 	return 2 + 2 * count_rtxs (x1)
13178 	       + count_rtxs (x == XEXP (x0, 0)
13179 			     ? XEXP (x0, 1) : XEXP (x0, 0));
13180     }
13181 
13182   fmt = GET_RTX_FORMAT (code);
13183   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13184     if (fmt[i] == 'e')
13185       ret += count_rtxs (XEXP (x, i));
13186     else if (fmt[i] == 'E')
13187       for (j = 0; j < XVECLEN (x, i); j++)
13188 	ret += count_rtxs (XVECEXP (x, i, j));
13189 
13190   return ret;
13191 }
13192 
13193 /* Utility function for following routine.  Called when X is part of a value
13194    being stored into last_set_value.  Sets last_set_table_tick
13195    for each register mentioned.  Similar to mention_regs in cse.c  */
13196 
13197 static void
13198 update_table_tick (rtx x)
13199 {
13200   enum rtx_code code = GET_CODE (x);
13201   const char *fmt = GET_RTX_FORMAT (code);
13202   int i, j;
13203 
13204   if (code == REG)
13205     {
13206       unsigned int regno = REGNO (x);
13207       unsigned int endregno = END_REGNO (x);
13208       unsigned int r;
13209 
13210       for (r = regno; r < endregno; r++)
13211 	{
13212 	  reg_stat_type *rsp = &reg_stat[r];
13213 	  rsp->last_set_table_tick = label_tick;
13214 	}
13215 
13216       return;
13217     }
13218 
13219   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13220     if (fmt[i] == 'e')
13221       {
13222 	/* Check for identical subexpressions.  If x contains
13223 	   identical subexpression we only have to traverse one of
13224 	   them.  */
13225 	if (i == 0 && ARITHMETIC_P (x))
13226 	  {
13227 	    /* Note that at this point x1 has already been
13228 	       processed.  */
13229 	    rtx x0 = XEXP (x, 0);
13230 	    rtx x1 = XEXP (x, 1);
13231 
13232 	    /* If x0 and x1 are identical then there is no need to
13233 	       process x0.  */
13234 	    if (x0 == x1)
13235 	      break;
13236 
13237 	    /* If x0 is identical to a subexpression of x1 then while
13238 	       processing x1, x0 has already been processed.  Thus we
13239 	       are done with x.  */
13240 	    if (ARITHMETIC_P (x1)
13241 		&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13242 	      break;
13243 
13244 	    /* If x1 is identical to a subexpression of x0 then we
13245 	       still have to process the rest of x0.  */
13246 	    if (ARITHMETIC_P (x0)
13247 		&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13248 	      {
13249 		update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13250 		break;
13251 	      }
13252 	  }
13253 
13254 	update_table_tick (XEXP (x, i));
13255       }
13256     else if (fmt[i] == 'E')
13257       for (j = 0; j < XVECLEN (x, i); j++)
13258 	update_table_tick (XVECEXP (x, i, j));
13259 }
13260 
13261 /* Record that REG is set to VALUE in insn INSN.  If VALUE is zero, we
13262    are saying that the register is clobbered and we no longer know its
13263    value.  If INSN is zero, don't update reg_stat[].last_set; this is
13264    only permitted with VALUE also zero and is used to invalidate the
13265    register.  */
13266 
13267 static void
13268 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13269 {
13270   unsigned int regno = REGNO (reg);
13271   unsigned int endregno = END_REGNO (reg);
13272   unsigned int i;
13273   reg_stat_type *rsp;
13274 
13275   /* If VALUE contains REG and we have a previous value for REG, substitute
13276      the previous value.  */
13277   if (value && insn && reg_overlap_mentioned_p (reg, value))
13278     {
13279       rtx tem;
13280 
13281       /* Set things up so get_last_value is allowed to see anything set up to
13282 	 our insn.  */
13283       subst_low_luid = DF_INSN_LUID (insn);
13284       tem = get_last_value (reg);
13285 
13286       /* If TEM is simply a binary operation with two CLOBBERs as operands,
13287 	 it isn't going to be useful and will take a lot of time to process,
13288 	 so just use the CLOBBER.  */
13289 
13290       if (tem)
13291 	{
13292 	  if (ARITHMETIC_P (tem)
13293 	      && GET_CODE (XEXP (tem, 0)) == CLOBBER
13294 	      && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13295 	    tem = XEXP (tem, 0);
13296 	  else if (count_occurrences (value, reg, 1) >= 2)
13297 	    {
13298 	      /* If there are two or more occurrences of REG in VALUE,
13299 		 prevent the value from growing too much.  */
13300 	      if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13301 		tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13302 	    }
13303 
13304 	  value = replace_rtx (copy_rtx (value), reg, tem);
13305 	}
13306     }
13307 
13308   /* For each register modified, show we don't know its value, that
13309      we don't know about its bitwise content, that its value has been
13310      updated, and that we don't know the location of the death of the
13311      register.  */
13312   for (i = regno; i < endregno; i++)
13313     {
13314       rsp = &reg_stat[i];
13315 
13316       if (insn)
13317 	rsp->last_set = insn;
13318 
13319       rsp->last_set_value = 0;
13320       rsp->last_set_mode = VOIDmode;
13321       rsp->last_set_nonzero_bits = 0;
13322       rsp->last_set_sign_bit_copies = 0;
13323       rsp->last_death = 0;
13324       rsp->truncated_to_mode = VOIDmode;
13325     }
13326 
13327   /* Mark registers that are being referenced in this value.  */
13328   if (value)
13329     update_table_tick (value);
13330 
13331   /* Now update the status of each register being set.
13332      If someone is using this register in this block, set this register
13333      to invalid since we will get confused between the two lives in this
13334      basic block.  This makes using this register always invalid.  In cse, we
13335      scan the table to invalidate all entries using this register, but this
13336      is too much work for us.  */
13337 
13338   for (i = regno; i < endregno; i++)
13339     {
13340       rsp = &reg_stat[i];
13341       rsp->last_set_label = label_tick;
13342       if (!insn
13343 	  || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13344 	rsp->last_set_invalid = 1;
13345       else
13346 	rsp->last_set_invalid = 0;
13347     }
13348 
13349   /* The value being assigned might refer to X (like in "x++;").  In that
13350      case, we must replace it with (clobber (const_int 0)) to prevent
13351      infinite loops.  */
13352   rsp = &reg_stat[regno];
13353   if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13354     {
13355       value = copy_rtx (value);
13356       if (!get_last_value_validate (&value, insn, label_tick, 1))
13357 	value = 0;
13358     }
13359 
13360   /* For the main register being modified, update the value, the mode, the
13361      nonzero bits, and the number of sign bit copies.  */
13362 
13363   rsp->last_set_value = value;
13364 
13365   if (value)
13366     {
13367       machine_mode mode = GET_MODE (reg);
13368       subst_low_luid = DF_INSN_LUID (insn);
13369       rsp->last_set_mode = mode;
13370       if (GET_MODE_CLASS (mode) == MODE_INT
13371 	  && HWI_COMPUTABLE_MODE_P (mode))
13372 	mode = nonzero_bits_mode;
13373       rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13374       rsp->last_set_sign_bit_copies
13375 	= num_sign_bit_copies (value, GET_MODE (reg));
13376     }
13377 }
13378 
13379 /* Called via note_stores from record_dead_and_set_regs to handle one
13380    SET or CLOBBER in an insn.  DATA is the instruction in which the
13381    set is occurring.  */
13382 
13383 static void
13384 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13385 {
13386   rtx_insn *record_dead_insn = (rtx_insn *) data;
13387 
13388   if (GET_CODE (dest) == SUBREG)
13389     dest = SUBREG_REG (dest);
13390 
13391   if (!record_dead_insn)
13392     {
13393       if (REG_P (dest))
13394 	record_value_for_reg (dest, NULL, NULL_RTX);
13395       return;
13396     }
13397 
13398   if (REG_P (dest))
13399     {
13400       /* If we are setting the whole register, we know its value.  Otherwise
13401 	 show that we don't know the value.  We can handle a SUBREG if it's
13402 	 the low part, but we must be careful with paradoxical SUBREGs on
13403 	 RISC architectures because we cannot strip e.g. an extension around
13404 	 a load and record the naked load since the RTL middle-end considers
13405 	 that the upper bits are defined according to LOAD_EXTEND_OP.  */
13406       if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13407 	record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13408       else if (GET_CODE (setter) == SET
13409 	       && GET_CODE (SET_DEST (setter)) == SUBREG
13410 	       && SUBREG_REG (SET_DEST (setter)) == dest
13411 	       && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13412 			    BITS_PER_WORD)
13413 	       && subreg_lowpart_p (SET_DEST (setter)))
13414 	record_value_for_reg (dest, record_dead_insn,
13415 			      WORD_REGISTER_OPERATIONS
13416 			      && word_register_operation_p (SET_SRC (setter))
13417 			      && paradoxical_subreg_p (SET_DEST (setter))
13418 			      ? SET_SRC (setter)
13419 			      : gen_lowpart (GET_MODE (dest),
13420 					     SET_SRC (setter)));
13421       else if (GET_CODE (setter) == CLOBBER_HIGH)
13422 	{
13423 	  reg_stat_type *rsp = &reg_stat[REGNO (dest)];
13424 	  if (rsp->last_set_value
13425 	      && reg_is_clobbered_by_clobber_high
13426 		   (REGNO (dest), GET_MODE (rsp->last_set_value),
13427 		    XEXP (setter, 0)))
13428 	    record_value_for_reg (dest, NULL, NULL_RTX);
13429 	}
13430       else
13431 	record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13432     }
13433   else if (MEM_P (dest)
13434 	   /* Ignore pushes, they clobber nothing.  */
13435 	   && ! push_operand (dest, GET_MODE (dest)))
13436     mem_last_set = DF_INSN_LUID (record_dead_insn);
13437 }
13438 
13439 /* Update the records of when each REG was most recently set or killed
13440    for the things done by INSN.  This is the last thing done in processing
13441    INSN in the combiner loop.
13442 
13443    We update reg_stat[], in particular fields last_set, last_set_value,
13444    last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13445    last_death, and also the similar information mem_last_set (which insn
13446    most recently modified memory) and last_call_luid (which insn was the
13447    most recent subroutine call).  */
13448 
13449 static void
13450 record_dead_and_set_regs (rtx_insn *insn)
13451 {
13452   rtx link;
13453   unsigned int i;
13454 
13455   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13456     {
13457       if (REG_NOTE_KIND (link) == REG_DEAD
13458 	  && REG_P (XEXP (link, 0)))
13459 	{
13460 	  unsigned int regno = REGNO (XEXP (link, 0));
13461 	  unsigned int endregno = END_REGNO (XEXP (link, 0));
13462 
13463 	  for (i = regno; i < endregno; i++)
13464 	    {
13465 	      reg_stat_type *rsp;
13466 
13467 	      rsp = &reg_stat[i];
13468 	      rsp->last_death = insn;
13469 	    }
13470 	}
13471       else if (REG_NOTE_KIND (link) == REG_INC)
13472 	record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13473     }
13474 
13475   if (CALL_P (insn))
13476     {
13477       hard_reg_set_iterator hrsi;
13478       EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13479 	{
13480 	  reg_stat_type *rsp;
13481 
13482 	  rsp = &reg_stat[i];
13483 	  rsp->last_set_invalid = 1;
13484 	  rsp->last_set = insn;
13485 	  rsp->last_set_value = 0;
13486 	  rsp->last_set_mode = VOIDmode;
13487 	  rsp->last_set_nonzero_bits = 0;
13488 	  rsp->last_set_sign_bit_copies = 0;
13489 	  rsp->last_death = 0;
13490 	  rsp->truncated_to_mode = VOIDmode;
13491 	}
13492 
13493       last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13494 
13495       /* We can't combine into a call pattern.  Remember, though, that
13496 	 the return value register is set at this LUID.  We could
13497 	 still replace a register with the return value from the
13498 	 wrong subroutine call!  */
13499       note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13500     }
13501   else
13502     note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13503 }
13504 
13505 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13506    register present in the SUBREG, so for each such SUBREG go back and
13507    adjust nonzero and sign bit information of the registers that are
13508    known to have some zero/sign bits set.
13509 
13510    This is needed because when combine blows the SUBREGs away, the
13511    information on zero/sign bits is lost and further combines can be
13512    missed because of that.  */
13513 
13514 static void
13515 record_promoted_value (rtx_insn *insn, rtx subreg)
13516 {
13517   struct insn_link *links;
13518   rtx set;
13519   unsigned int regno = REGNO (SUBREG_REG (subreg));
13520   machine_mode mode = GET_MODE (subreg);
13521 
13522   if (!HWI_COMPUTABLE_MODE_P (mode))
13523     return;
13524 
13525   for (links = LOG_LINKS (insn); links;)
13526     {
13527       reg_stat_type *rsp;
13528 
13529       insn = links->insn;
13530       set = single_set (insn);
13531 
13532       if (! set || !REG_P (SET_DEST (set))
13533 	  || REGNO (SET_DEST (set)) != regno
13534 	  || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13535 	{
13536 	  links = links->next;
13537 	  continue;
13538 	}
13539 
13540       rsp = &reg_stat[regno];
13541       if (rsp->last_set == insn)
13542 	{
13543 	  if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13544 	    rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13545 	}
13546 
13547       if (REG_P (SET_SRC (set)))
13548 	{
13549 	  regno = REGNO (SET_SRC (set));
13550 	  links = LOG_LINKS (insn);
13551 	}
13552       else
13553 	break;
13554     }
13555 }
13556 
13557 /* Check if X, a register, is known to contain a value already
13558    truncated to MODE.  In this case we can use a subreg to refer to
13559    the truncated value even though in the generic case we would need
13560    an explicit truncation.  */
13561 
13562 static bool
13563 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13564 {
13565   reg_stat_type *rsp = &reg_stat[REGNO (x)];
13566   machine_mode truncated = rsp->truncated_to_mode;
13567 
13568   if (truncated == 0
13569       || rsp->truncation_label < label_tick_ebb_start)
13570     return false;
13571   if (!partial_subreg_p (mode, truncated))
13572     return true;
13573   if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13574     return true;
13575   return false;
13576 }
13577 
13578 /* If X is a hard reg or a subreg record the mode that the register is
13579    accessed in.  For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13580    able to turn a truncate into a subreg using this information.  Return true
13581    if traversing X is complete.  */
13582 
13583 static bool
13584 record_truncated_value (rtx x)
13585 {
13586   machine_mode truncated_mode;
13587   reg_stat_type *rsp;
13588 
13589   if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13590     {
13591       machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13592       truncated_mode = GET_MODE (x);
13593 
13594       if (!partial_subreg_p (truncated_mode, original_mode))
13595 	return true;
13596 
13597       truncated_mode = GET_MODE (x);
13598       if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13599 	return true;
13600 
13601       x = SUBREG_REG (x);
13602     }
13603   /* ??? For hard-regs we now record everything.  We might be able to
13604      optimize this using last_set_mode.  */
13605   else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13606     truncated_mode = GET_MODE (x);
13607   else
13608     return false;
13609 
13610   rsp = &reg_stat[REGNO (x)];
13611   if (rsp->truncated_to_mode == 0
13612       || rsp->truncation_label < label_tick_ebb_start
13613       || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13614     {
13615       rsp->truncated_to_mode = truncated_mode;
13616       rsp->truncation_label = label_tick;
13617     }
13618 
13619   return true;
13620 }
13621 
13622 /* Callback for note_uses.  Find hardregs and subregs of pseudos and
13623    the modes they are used in.  This can help truning TRUNCATEs into
13624    SUBREGs.  */
13625 
13626 static void
13627 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13628 {
13629   subrtx_var_iterator::array_type array;
13630   FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13631     if (record_truncated_value (*iter))
13632       iter.skip_subrtxes ();
13633 }
13634 
13635 /* Scan X for promoted SUBREGs.  For each one found,
13636    note what it implies to the registers used in it.  */
13637 
13638 static void
13639 check_promoted_subreg (rtx_insn *insn, rtx x)
13640 {
13641   if (GET_CODE (x) == SUBREG
13642       && SUBREG_PROMOTED_VAR_P (x)
13643       && REG_P (SUBREG_REG (x)))
13644     record_promoted_value (insn, x);
13645   else
13646     {
13647       const char *format = GET_RTX_FORMAT (GET_CODE (x));
13648       int i, j;
13649 
13650       for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13651 	switch (format[i])
13652 	  {
13653 	  case 'e':
13654 	    check_promoted_subreg (insn, XEXP (x, i));
13655 	    break;
13656 	  case 'V':
13657 	  case 'E':
13658 	    if (XVEC (x, i) != 0)
13659 	      for (j = 0; j < XVECLEN (x, i); j++)
13660 		check_promoted_subreg (insn, XVECEXP (x, i, j));
13661 	    break;
13662 	  }
13663     }
13664 }
13665 
13666 /* Verify that all the registers and memory references mentioned in *LOC are
13667    still valid.  *LOC was part of a value set in INSN when label_tick was
13668    equal to TICK.  Return 0 if some are not.  If REPLACE is nonzero, replace
13669    the invalid references with (clobber (const_int 0)) and return 1.  This
13670    replacement is useful because we often can get useful information about
13671    the form of a value (e.g., if it was produced by a shift that always
13672    produces -1 or 0) even though we don't know exactly what registers it
13673    was produced from.  */
13674 
13675 static int
13676 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13677 {
13678   rtx x = *loc;
13679   const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13680   int len = GET_RTX_LENGTH (GET_CODE (x));
13681   int i, j;
13682 
13683   if (REG_P (x))
13684     {
13685       unsigned int regno = REGNO (x);
13686       unsigned int endregno = END_REGNO (x);
13687       unsigned int j;
13688 
13689       for (j = regno; j < endregno; j++)
13690 	{
13691 	  reg_stat_type *rsp = &reg_stat[j];
13692 	  if (rsp->last_set_invalid
13693 	      /* If this is a pseudo-register that was only set once and not
13694 		 live at the beginning of the function, it is always valid.  */
13695 	      || (! (regno >= FIRST_PSEUDO_REGISTER
13696 		     && regno < reg_n_sets_max
13697 		     && REG_N_SETS (regno) == 1
13698 		     && (!REGNO_REG_SET_P
13699 			 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13700 			  regno)))
13701 		  && rsp->last_set_label > tick))
13702 	  {
13703 	    if (replace)
13704 	      *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13705 	    return replace;
13706 	  }
13707 	}
13708 
13709       return 1;
13710     }
13711   /* If this is a memory reference, make sure that there were no stores after
13712      it that might have clobbered the value.  We don't have alias info, so we
13713      assume any store invalidates it.  Moreover, we only have local UIDs, so
13714      we also assume that there were stores in the intervening basic blocks.  */
13715   else if (MEM_P (x) && !MEM_READONLY_P (x)
13716 	   && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13717     {
13718       if (replace)
13719 	*loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13720       return replace;
13721     }
13722 
13723   for (i = 0; i < len; i++)
13724     {
13725       if (fmt[i] == 'e')
13726 	{
13727 	  /* Check for identical subexpressions.  If x contains
13728 	     identical subexpression we only have to traverse one of
13729 	     them.  */
13730 	  if (i == 1 && ARITHMETIC_P (x))
13731 	    {
13732 	      /* Note that at this point x0 has already been checked
13733 		 and found valid.  */
13734 	      rtx x0 = XEXP (x, 0);
13735 	      rtx x1 = XEXP (x, 1);
13736 
13737 	      /* If x0 and x1 are identical then x is also valid.  */
13738 	      if (x0 == x1)
13739 		return 1;
13740 
13741 	      /* If x1 is identical to a subexpression of x0 then
13742 		 while checking x0, x1 has already been checked.  Thus
13743 		 it is valid and so as x.  */
13744 	      if (ARITHMETIC_P (x0)
13745 		  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13746 		return 1;
13747 
13748 	      /* If x0 is identical to a subexpression of x1 then x is
13749 		 valid iff the rest of x1 is valid.  */
13750 	      if (ARITHMETIC_P (x1)
13751 		  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13752 		return
13753 		  get_last_value_validate (&XEXP (x1,
13754 						  x0 == XEXP (x1, 0) ? 1 : 0),
13755 					   insn, tick, replace);
13756 	    }
13757 
13758 	  if (get_last_value_validate (&XEXP (x, i), insn, tick,
13759 				       replace) == 0)
13760 	    return 0;
13761 	}
13762       else if (fmt[i] == 'E')
13763 	for (j = 0; j < XVECLEN (x, i); j++)
13764 	  if (get_last_value_validate (&XVECEXP (x, i, j),
13765 				       insn, tick, replace) == 0)
13766 	    return 0;
13767     }
13768 
13769   /* If we haven't found a reason for it to be invalid, it is valid.  */
13770   return 1;
13771 }
13772 
13773 /* Get the last value assigned to X, if known.  Some registers
13774    in the value may be replaced with (clobber (const_int 0)) if their value
13775    is known longer known reliably.  */
13776 
13777 static rtx
13778 get_last_value (const_rtx x)
13779 {
13780   unsigned int regno;
13781   rtx value;
13782   reg_stat_type *rsp;
13783 
13784   /* If this is a non-paradoxical SUBREG, get the value of its operand and
13785      then convert it to the desired mode.  If this is a paradoxical SUBREG,
13786      we cannot predict what values the "extra" bits might have.  */
13787   if (GET_CODE (x) == SUBREG
13788       && subreg_lowpart_p (x)
13789       && !paradoxical_subreg_p (x)
13790       && (value = get_last_value (SUBREG_REG (x))) != 0)
13791     return gen_lowpart (GET_MODE (x), value);
13792 
13793   if (!REG_P (x))
13794     return 0;
13795 
13796   regno = REGNO (x);
13797   rsp = &reg_stat[regno];
13798   value = rsp->last_set_value;
13799 
13800   /* If we don't have a value, or if it isn't for this basic block and
13801      it's either a hard register, set more than once, or it's a live
13802      at the beginning of the function, return 0.
13803 
13804      Because if it's not live at the beginning of the function then the reg
13805      is always set before being used (is never used without being set).
13806      And, if it's set only once, and it's always set before use, then all
13807      uses must have the same last value, even if it's not from this basic
13808      block.  */
13809 
13810   if (value == 0
13811       || (rsp->last_set_label < label_tick_ebb_start
13812 	  && (regno < FIRST_PSEUDO_REGISTER
13813 	      || regno >= reg_n_sets_max
13814 	      || REG_N_SETS (regno) != 1
13815 	      || REGNO_REG_SET_P
13816 		 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13817     return 0;
13818 
13819   /* If the value was set in a later insn than the ones we are processing,
13820      we can't use it even if the register was only set once.  */
13821   if (rsp->last_set_label == label_tick
13822       && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13823     return 0;
13824 
13825   /* If fewer bits were set than what we are asked for now, we cannot use
13826      the value.  */
13827   if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13828 		GET_MODE_PRECISION (GET_MODE (x))))
13829     return 0;
13830 
13831   /* If the value has all its registers valid, return it.  */
13832   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13833     return value;
13834 
13835   /* Otherwise, make a copy and replace any invalid register with
13836      (clobber (const_int 0)).  If that fails for some reason, return 0.  */
13837 
13838   value = copy_rtx (value);
13839   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13840     return value;
13841 
13842   return 0;
13843 }
13844 
13845 /* Define three variables used for communication between the following
13846    routines.  */
13847 
13848 static unsigned int reg_dead_regno, reg_dead_endregno;
13849 static int reg_dead_flag;
13850 rtx reg_dead_reg;
13851 
13852 /* Function called via note_stores from reg_dead_at_p.
13853 
13854    If DEST is within [reg_dead_regno, reg_dead_endregno), set
13855    reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET.  */
13856 
13857 static void
13858 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13859 {
13860   unsigned int regno, endregno;
13861 
13862   if (!REG_P (dest))
13863     return;
13864 
13865   if (GET_CODE (x) == CLOBBER_HIGH
13866       && !reg_is_clobbered_by_clobber_high (reg_dead_reg, XEXP (x, 0)))
13867     return;
13868 
13869   regno = REGNO (dest);
13870   endregno = END_REGNO (dest);
13871   if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13872     reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13873 }
13874 
13875 /* Return nonzero if REG is known to be dead at INSN.
13876 
13877    We scan backwards from INSN.  If we hit a REG_DEAD note or a CLOBBER
13878    referencing REG, it is dead.  If we hit a SET referencing REG, it is
13879    live.  Otherwise, see if it is live or dead at the start of the basic
13880    block we are in.  Hard regs marked as being live in NEWPAT_USED_REGS
13881    must be assumed to be always live.  */
13882 
13883 static int
13884 reg_dead_at_p (rtx reg, rtx_insn *insn)
13885 {
13886   basic_block block;
13887   unsigned int i;
13888 
13889   /* Set variables for reg_dead_at_p_1.  */
13890   reg_dead_regno = REGNO (reg);
13891   reg_dead_endregno = END_REGNO (reg);
13892   reg_dead_reg = reg;
13893 
13894   reg_dead_flag = 0;
13895 
13896   /* Check that reg isn't mentioned in NEWPAT_USED_REGS.  For fixed registers
13897      we allow the machine description to decide whether use-and-clobber
13898      patterns are OK.  */
13899   if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13900     {
13901       for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13902 	if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13903 	  return 0;
13904     }
13905 
13906   /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13907      beginning of basic block.  */
13908   block = BLOCK_FOR_INSN (insn);
13909   for (;;)
13910     {
13911       if (INSN_P (insn))
13912         {
13913 	  if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13914 	    return 1;
13915 
13916 	  note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13917 	  if (reg_dead_flag)
13918 	    return reg_dead_flag == 1 ? 1 : 0;
13919 
13920 	  if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13921 	    return 1;
13922         }
13923 
13924       if (insn == BB_HEAD (block))
13925 	break;
13926 
13927       insn = PREV_INSN (insn);
13928     }
13929 
13930   /* Look at live-in sets for the basic block that we were in.  */
13931   for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13932     if (REGNO_REG_SET_P (df_get_live_in (block), i))
13933       return 0;
13934 
13935   return 1;
13936 }
13937 
13938 /* Note hard registers in X that are used.  */
13939 
13940 static void
13941 mark_used_regs_combine (rtx x)
13942 {
13943   RTX_CODE code = GET_CODE (x);
13944   unsigned int regno;
13945   int i;
13946 
13947   switch (code)
13948     {
13949     case LABEL_REF:
13950     case SYMBOL_REF:
13951     case CONST:
13952     CASE_CONST_ANY:
13953     case PC:
13954     case ADDR_VEC:
13955     case ADDR_DIFF_VEC:
13956     case ASM_INPUT:
13957     /* CC0 must die in the insn after it is set, so we don't need to take
13958        special note of it here.  */
13959     case CC0:
13960       return;
13961 
13962     case CLOBBER:
13963       /* If we are clobbering a MEM, mark any hard registers inside the
13964 	 address as used.  */
13965       if (MEM_P (XEXP (x, 0)))
13966 	mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13967       return;
13968 
13969     case REG:
13970       regno = REGNO (x);
13971       /* A hard reg in a wide mode may really be multiple registers.
13972 	 If so, mark all of them just like the first.  */
13973       if (regno < FIRST_PSEUDO_REGISTER)
13974 	{
13975 	  /* None of this applies to the stack, frame or arg pointers.  */
13976 	  if (regno == STACK_POINTER_REGNUM
13977 	      || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13978 		  && regno == HARD_FRAME_POINTER_REGNUM)
13979 	      || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13980 		  && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13981 	      || regno == FRAME_POINTER_REGNUM)
13982 	    return;
13983 
13984 	  add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13985 	}
13986       return;
13987 
13988     case SET:
13989       {
13990 	/* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13991 	   the address.  */
13992 	rtx testreg = SET_DEST (x);
13993 
13994 	while (GET_CODE (testreg) == SUBREG
13995 	       || GET_CODE (testreg) == ZERO_EXTRACT
13996 	       || GET_CODE (testreg) == STRICT_LOW_PART)
13997 	  testreg = XEXP (testreg, 0);
13998 
13999 	if (MEM_P (testreg))
14000 	  mark_used_regs_combine (XEXP (testreg, 0));
14001 
14002 	mark_used_regs_combine (SET_SRC (x));
14003       }
14004       return;
14005 
14006     default:
14007       break;
14008     }
14009 
14010   /* Recursively scan the operands of this expression.  */
14011 
14012   {
14013     const char *fmt = GET_RTX_FORMAT (code);
14014 
14015     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
14016       {
14017 	if (fmt[i] == 'e')
14018 	  mark_used_regs_combine (XEXP (x, i));
14019 	else if (fmt[i] == 'E')
14020 	  {
14021 	    int j;
14022 
14023 	    for (j = 0; j < XVECLEN (x, i); j++)
14024 	      mark_used_regs_combine (XVECEXP (x, i, j));
14025 	  }
14026       }
14027   }
14028 }
14029 
14030 /* Remove register number REGNO from the dead registers list of INSN.
14031 
14032    Return the note used to record the death, if there was one.  */
14033 
14034 rtx
14035 remove_death (unsigned int regno, rtx_insn *insn)
14036 {
14037   rtx note = find_regno_note (insn, REG_DEAD, regno);
14038 
14039   if (note)
14040     remove_note (insn, note);
14041 
14042   return note;
14043 }
14044 
14045 /* For each register (hardware or pseudo) used within expression X, if its
14046    death is in an instruction with luid between FROM_LUID (inclusive) and
14047    TO_INSN (exclusive), put a REG_DEAD note for that register in the
14048    list headed by PNOTES.
14049 
14050    That said, don't move registers killed by maybe_kill_insn.
14051 
14052    This is done when X is being merged by combination into TO_INSN.  These
14053    notes will then be distributed as needed.  */
14054 
14055 static void
14056 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
14057 	     rtx *pnotes)
14058 {
14059   const char *fmt;
14060   int len, i;
14061   enum rtx_code code = GET_CODE (x);
14062 
14063   if (code == REG)
14064     {
14065       unsigned int regno = REGNO (x);
14066       rtx_insn *where_dead = reg_stat[regno].last_death;
14067 
14068       /* If we do not know where the register died, it may still die between
14069 	 FROM_LUID and TO_INSN.  If so, find it.  This is PR83304.  */
14070       if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
14071 	{
14072 	  rtx_insn *insn = prev_real_nondebug_insn (to_insn);
14073 	  while (insn
14074 		 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
14075 		 && DF_INSN_LUID (insn) >= from_luid)
14076 	    {
14077 	      if (dead_or_set_regno_p (insn, regno))
14078 		{
14079 		  if (find_regno_note (insn, REG_DEAD, regno))
14080 		    where_dead = insn;
14081 		  break;
14082 		}
14083 
14084 	      insn = prev_real_nondebug_insn (insn);
14085 	    }
14086 	}
14087 
14088       /* Don't move the register if it gets killed in between from and to.  */
14089       if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
14090 	  && ! reg_referenced_p (x, maybe_kill_insn))
14091 	return;
14092 
14093       if (where_dead
14094 	  && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14095 	  && DF_INSN_LUID (where_dead) >= from_luid
14096 	  && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14097 	{
14098 	  rtx note = remove_death (regno, where_dead);
14099 
14100 	  /* It is possible for the call above to return 0.  This can occur
14101 	     when last_death points to I2 or I1 that we combined with.
14102 	     In that case make a new note.
14103 
14104 	     We must also check for the case where X is a hard register
14105 	     and NOTE is a death note for a range of hard registers
14106 	     including X.  In that case, we must put REG_DEAD notes for
14107 	     the remaining registers in place of NOTE.  */
14108 
14109 	  if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14110 	      && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14111 	    {
14112 	      unsigned int deadregno = REGNO (XEXP (note, 0));
14113 	      unsigned int deadend = END_REGNO (XEXP (note, 0));
14114 	      unsigned int ourend = END_REGNO (x);
14115 	      unsigned int i;
14116 
14117 	      for (i = deadregno; i < deadend; i++)
14118 		if (i < regno || i >= ourend)
14119 		  add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14120 	    }
14121 
14122 	  /* If we didn't find any note, or if we found a REG_DEAD note that
14123 	     covers only part of the given reg, and we have a multi-reg hard
14124 	     register, then to be safe we must check for REG_DEAD notes
14125 	     for each register other than the first.  They could have
14126 	     their own REG_DEAD notes lying around.  */
14127 	  else if ((note == 0
14128 		    || (note != 0
14129 			&& partial_subreg_p (GET_MODE (XEXP (note, 0)),
14130 					     GET_MODE (x))))
14131 		   && regno < FIRST_PSEUDO_REGISTER
14132 		   && REG_NREGS (x) > 1)
14133 	    {
14134 	      unsigned int ourend = END_REGNO (x);
14135 	      unsigned int i, offset;
14136 	      rtx oldnotes = 0;
14137 
14138 	      if (note)
14139 		offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14140 	      else
14141 		offset = 1;
14142 
14143 	      for (i = regno + offset; i < ourend; i++)
14144 		move_deaths (regno_reg_rtx[i],
14145 			     maybe_kill_insn, from_luid, to_insn, &oldnotes);
14146 	    }
14147 
14148 	  if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14149 	    {
14150 	      XEXP (note, 1) = *pnotes;
14151 	      *pnotes = note;
14152 	    }
14153 	  else
14154 	    *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14155 	}
14156 
14157       return;
14158     }
14159 
14160   else if (GET_CODE (x) == SET)
14161     {
14162       rtx dest = SET_DEST (x);
14163 
14164       move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14165 
14166       /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14167 	 that accesses one word of a multi-word item, some
14168 	 piece of everything register in the expression is used by
14169 	 this insn, so remove any old death.  */
14170       /* ??? So why do we test for equality of the sizes?  */
14171 
14172       if (GET_CODE (dest) == ZERO_EXTRACT
14173 	  || GET_CODE (dest) == STRICT_LOW_PART
14174 	  || (GET_CODE (dest) == SUBREG
14175 	      && !read_modify_subreg_p (dest)))
14176 	{
14177 	  move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14178 	  return;
14179 	}
14180 
14181       /* If this is some other SUBREG, we know it replaces the entire
14182 	 value, so use that as the destination.  */
14183       if (GET_CODE (dest) == SUBREG)
14184 	dest = SUBREG_REG (dest);
14185 
14186       /* If this is a MEM, adjust deaths of anything used in the address.
14187 	 For a REG (the only other possibility), the entire value is
14188 	 being replaced so the old value is not used in this insn.  */
14189 
14190       if (MEM_P (dest))
14191 	move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14192 		     to_insn, pnotes);
14193       return;
14194     }
14195 
14196   else if (GET_CODE (x) == CLOBBER)
14197     return;
14198 
14199   len = GET_RTX_LENGTH (code);
14200   fmt = GET_RTX_FORMAT (code);
14201 
14202   for (i = 0; i < len; i++)
14203     {
14204       if (fmt[i] == 'E')
14205 	{
14206 	  int j;
14207 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14208 	    move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14209 			 to_insn, pnotes);
14210 	}
14211       else if (fmt[i] == 'e')
14212 	move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14213     }
14214 }
14215 
14216 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14217    pattern of an insn.  X must be a REG.  */
14218 
14219 static int
14220 reg_bitfield_target_p (rtx x, rtx body)
14221 {
14222   int i;
14223 
14224   if (GET_CODE (body) == SET)
14225     {
14226       rtx dest = SET_DEST (body);
14227       rtx target;
14228       unsigned int regno, tregno, endregno, endtregno;
14229 
14230       if (GET_CODE (dest) == ZERO_EXTRACT)
14231 	target = XEXP (dest, 0);
14232       else if (GET_CODE (dest) == STRICT_LOW_PART)
14233 	target = SUBREG_REG (XEXP (dest, 0));
14234       else
14235 	return 0;
14236 
14237       if (GET_CODE (target) == SUBREG)
14238 	target = SUBREG_REG (target);
14239 
14240       if (!REG_P (target))
14241 	return 0;
14242 
14243       tregno = REGNO (target), regno = REGNO (x);
14244       if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14245 	return target == x;
14246 
14247       endtregno = end_hard_regno (GET_MODE (target), tregno);
14248       endregno = end_hard_regno (GET_MODE (x), regno);
14249 
14250       return endregno > tregno && regno < endtregno;
14251     }
14252 
14253   else if (GET_CODE (body) == PARALLEL)
14254     for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14255       if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14256 	return 1;
14257 
14258   return 0;
14259 }
14260 
14261 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14262    as appropriate.  I3 and I2 are the insns resulting from the combination
14263    insns including FROM (I2 may be zero).
14264 
14265    ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14266    not need REG_DEAD notes because they are being substituted for.  This
14267    saves searching in the most common cases.
14268 
14269    Each note in the list is either ignored or placed on some insns, depending
14270    on the type of note.  */
14271 
14272 static void
14273 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14274 		  rtx elim_i2, rtx elim_i1, rtx elim_i0)
14275 {
14276   rtx note, next_note;
14277   rtx tem_note;
14278   rtx_insn *tem_insn;
14279 
14280   for (note = notes; note; note = next_note)
14281     {
14282       rtx_insn *place = 0, *place2 = 0;
14283 
14284       next_note = XEXP (note, 1);
14285       switch (REG_NOTE_KIND (note))
14286 	{
14287 	case REG_BR_PROB:
14288 	case REG_BR_PRED:
14289 	  /* Doesn't matter much where we put this, as long as it's somewhere.
14290 	     It is preferable to keep these notes on branches, which is most
14291 	     likely to be i3.  */
14292 	  place = i3;
14293 	  break;
14294 
14295 	case REG_NON_LOCAL_GOTO:
14296 	  if (JUMP_P (i3))
14297 	    place = i3;
14298 	  else
14299 	    {
14300 	      gcc_assert (i2 && JUMP_P (i2));
14301 	      place = i2;
14302 	    }
14303 	  break;
14304 
14305 	case REG_EH_REGION:
14306 	  /* These notes must remain with the call or trapping instruction.  */
14307 	  if (CALL_P (i3))
14308 	    place = i3;
14309 	  else if (i2 && CALL_P (i2))
14310 	    place = i2;
14311 	  else
14312 	    {
14313 	      gcc_assert (cfun->can_throw_non_call_exceptions);
14314 	      if (may_trap_p (i3))
14315 		place = i3;
14316 	      else if (i2 && may_trap_p (i2))
14317 		place = i2;
14318 	      /* ??? Otherwise assume we've combined things such that we
14319 		 can now prove that the instructions can't trap.  Drop the
14320 		 note in this case.  */
14321 	    }
14322 	  break;
14323 
14324 	case REG_ARGS_SIZE:
14325 	  /* ??? How to distribute between i3-i1.  Assume i3 contains the
14326 	     entire adjustment.  Assert i3 contains at least some adjust.  */
14327 	  if (!noop_move_p (i3))
14328 	    {
14329 	      poly_int64 old_size, args_size = get_args_size (note);
14330 	      /* fixup_args_size_notes looks at REG_NORETURN note,
14331 		 so ensure the note is placed there first.  */
14332 	      if (CALL_P (i3))
14333 		{
14334 		  rtx *np;
14335 		  for (np = &next_note; *np; np = &XEXP (*np, 1))
14336 		    if (REG_NOTE_KIND (*np) == REG_NORETURN)
14337 		      {
14338 			rtx n = *np;
14339 			*np = XEXP (n, 1);
14340 			XEXP (n, 1) = REG_NOTES (i3);
14341 			REG_NOTES (i3) = n;
14342 			break;
14343 		      }
14344 		}
14345 	      old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14346 	      /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14347 		 REG_ARGS_SIZE note to all noreturn calls, allow that here.  */
14348 	      gcc_assert (maybe_ne (old_size, args_size)
14349 			  || (CALL_P (i3)
14350 			      && !ACCUMULATE_OUTGOING_ARGS
14351 			      && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14352 	    }
14353 	  break;
14354 
14355 	case REG_NORETURN:
14356 	case REG_SETJMP:
14357 	case REG_TM:
14358 	case REG_CALL_DECL:
14359 	case REG_CALL_NOCF_CHECK:
14360 	  /* These notes must remain with the call.  It should not be
14361 	     possible for both I2 and I3 to be a call.  */
14362 	  if (CALL_P (i3))
14363 	    place = i3;
14364 	  else
14365 	    {
14366 	      gcc_assert (i2 && CALL_P (i2));
14367 	      place = i2;
14368 	    }
14369 	  break;
14370 
14371 	case REG_UNUSED:
14372 	  /* Any clobbers for i3 may still exist, and so we must process
14373 	     REG_UNUSED notes from that insn.
14374 
14375 	     Any clobbers from i2 or i1 can only exist if they were added by
14376 	     recog_for_combine.  In that case, recog_for_combine created the
14377 	     necessary REG_UNUSED notes.  Trying to keep any original
14378 	     REG_UNUSED notes from these insns can cause incorrect output
14379 	     if it is for the same register as the original i3 dest.
14380 	     In that case, we will notice that the register is set in i3,
14381 	     and then add a REG_UNUSED note for the destination of i3, which
14382 	     is wrong.  However, it is possible to have REG_UNUSED notes from
14383 	     i2 or i1 for register which were both used and clobbered, so
14384 	     we keep notes from i2 or i1 if they will turn into REG_DEAD
14385 	     notes.  */
14386 
14387 	  /* If this register is set or clobbered in I3, put the note there
14388 	     unless there is one already.  */
14389 	  if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14390 	    {
14391 	      if (from_insn != i3)
14392 		break;
14393 
14394 	      if (! (REG_P (XEXP (note, 0))
14395 		     ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14396 		     : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14397 		place = i3;
14398 	    }
14399 	  /* Otherwise, if this register is used by I3, then this register
14400 	     now dies here, so we must put a REG_DEAD note here unless there
14401 	     is one already.  */
14402 	  else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14403 		   && ! (REG_P (XEXP (note, 0))
14404 			 ? find_regno_note (i3, REG_DEAD,
14405 					    REGNO (XEXP (note, 0)))
14406 			 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14407 	    {
14408 	      PUT_REG_NOTE_KIND (note, REG_DEAD);
14409 	      place = i3;
14410 	    }
14411 
14412 	  /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14413 	     but we can't tell which at this point.  We must reset any
14414 	     expectations we had about the value that was previously
14415 	     stored in the reg.  ??? Ideally, we'd adjust REG_N_SETS
14416 	     and, if appropriate, restore its previous value, but we
14417 	     don't have enough information for that at this point.  */
14418 	  else
14419 	    {
14420 	      record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14421 
14422 	      /* Otherwise, if this register is now referenced in i2
14423 		 then the register used to be modified in one of the
14424 		 original insns.  If it was i3 (say, in an unused
14425 		 parallel), it's now completely gone, so the note can
14426 		 be discarded.  But if it was modified in i2, i1 or i0
14427 		 and we still reference it in i2, then we're
14428 		 referencing the previous value, and since the
14429 		 register was modified and REG_UNUSED, we know that
14430 		 the previous value is now dead.  So, if we only
14431 		 reference the register in i2, we change the note to
14432 		 REG_DEAD, to reflect the previous value.  However, if
14433 		 we're also setting or clobbering the register as
14434 		 scratch, we know (because the register was not
14435 		 referenced in i3) that it's unused, just as it was
14436 		 unused before, and we place the note in i2.  */
14437 	      if (from_insn != i3 && i2 && INSN_P (i2)
14438 		  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14439 		{
14440 		  if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14441 		    PUT_REG_NOTE_KIND (note, REG_DEAD);
14442 		  if (! (REG_P (XEXP (note, 0))
14443 			 ? find_regno_note (i2, REG_NOTE_KIND (note),
14444 					    REGNO (XEXP (note, 0)))
14445 			 : find_reg_note (i2, REG_NOTE_KIND (note),
14446 					  XEXP (note, 0))))
14447 		    place = i2;
14448 		}
14449 	    }
14450 
14451 	  break;
14452 
14453 	case REG_EQUAL:
14454 	case REG_EQUIV:
14455 	case REG_NOALIAS:
14456 	  /* These notes say something about results of an insn.  We can
14457 	     only support them if they used to be on I3 in which case they
14458 	     remain on I3.  Otherwise they are ignored.
14459 
14460 	     If the note refers to an expression that is not a constant, we
14461 	     must also ignore the note since we cannot tell whether the
14462 	     equivalence is still true.  It might be possible to do
14463 	     slightly better than this (we only have a problem if I2DEST
14464 	     or I1DEST is present in the expression), but it doesn't
14465 	     seem worth the trouble.  */
14466 
14467 	  if (from_insn == i3
14468 	      && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14469 	    place = i3;
14470 	  break;
14471 
14472 	case REG_INC:
14473 	  /* These notes say something about how a register is used.  They must
14474 	     be present on any use of the register in I2 or I3.  */
14475 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14476 	    place = i3;
14477 
14478 	  if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14479 	    {
14480 	      if (place)
14481 		place2 = i2;
14482 	      else
14483 		place = i2;
14484 	    }
14485 	  break;
14486 
14487 	case REG_LABEL_TARGET:
14488 	case REG_LABEL_OPERAND:
14489 	  /* This can show up in several ways -- either directly in the
14490 	     pattern, or hidden off in the constant pool with (or without?)
14491 	     a REG_EQUAL note.  */
14492 	  /* ??? Ignore the without-reg_equal-note problem for now.  */
14493 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14494 	      || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14495 		  && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14496 		  && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14497 	    place = i3;
14498 
14499 	  if (i2
14500 	      && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14501 		  || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14502 		      && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14503 		      && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14504 	    {
14505 	      if (place)
14506 		place2 = i2;
14507 	      else
14508 		place = i2;
14509 	    }
14510 
14511 	  /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14512 	     as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14513 	     there.  */
14514 	  if (place && JUMP_P (place)
14515 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14516 	      && (JUMP_LABEL (place) == NULL
14517 		  || JUMP_LABEL (place) == XEXP (note, 0)))
14518 	    {
14519 	      rtx label = JUMP_LABEL (place);
14520 
14521 	      if (!label)
14522 		JUMP_LABEL (place) = XEXP (note, 0);
14523 	      else if (LABEL_P (label))
14524 		LABEL_NUSES (label)--;
14525 	    }
14526 
14527 	  if (place2 && JUMP_P (place2)
14528 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14529 	      && (JUMP_LABEL (place2) == NULL
14530 		  || JUMP_LABEL (place2) == XEXP (note, 0)))
14531 	    {
14532 	      rtx label = JUMP_LABEL (place2);
14533 
14534 	      if (!label)
14535 		JUMP_LABEL (place2) = XEXP (note, 0);
14536 	      else if (LABEL_P (label))
14537 		LABEL_NUSES (label)--;
14538 	      place2 = 0;
14539 	    }
14540 	  break;
14541 
14542 	case REG_NONNEG:
14543 	  /* This note says something about the value of a register prior
14544 	     to the execution of an insn.  It is too much trouble to see
14545 	     if the note is still correct in all situations.  It is better
14546 	     to simply delete it.  */
14547 	  break;
14548 
14549 	case REG_DEAD:
14550 	  /* If we replaced the right hand side of FROM_INSN with a
14551 	     REG_EQUAL note, the original use of the dying register
14552 	     will not have been combined into I3 and I2.  In such cases,
14553 	     FROM_INSN is guaranteed to be the first of the combined
14554 	     instructions, so we simply need to search back before
14555 	     FROM_INSN for the previous use or set of this register,
14556 	     then alter the notes there appropriately.
14557 
14558 	     If the register is used as an input in I3, it dies there.
14559 	     Similarly for I2, if it is nonzero and adjacent to I3.
14560 
14561 	     If the register is not used as an input in either I3 or I2
14562 	     and it is not one of the registers we were supposed to eliminate,
14563 	     there are two possibilities.  We might have a non-adjacent I2
14564 	     or we might have somehow eliminated an additional register
14565 	     from a computation.  For example, we might have had A & B where
14566 	     we discover that B will always be zero.  In this case we will
14567 	     eliminate the reference to A.
14568 
14569 	     In both cases, we must search to see if we can find a previous
14570 	     use of A and put the death note there.  */
14571 
14572 	  if (from_insn
14573 	      && from_insn == i2mod
14574 	      && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14575 	    tem_insn = from_insn;
14576 	  else
14577 	    {
14578 	      if (from_insn
14579 		  && CALL_P (from_insn)
14580 		  && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14581 		place = from_insn;
14582 	      else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14583 		{
14584 		  /* If the new I2 sets the same register that is marked
14585 		     dead in the note, we do not in general know where to
14586 		     put the note.  One important case we _can_ handle is
14587 		     when the note comes from I3.  */
14588 		  if (from_insn == i3)
14589 		    place = i3;
14590 		  else
14591 		    break;
14592 		}
14593 	      else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14594 		place = i3;
14595 	      else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14596 		       && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14597 		place = i2;
14598 	      else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14599 			&& !(i2mod
14600 			     && reg_overlap_mentioned_p (XEXP (note, 0),
14601 							 i2mod_old_rhs)))
14602 		       || rtx_equal_p (XEXP (note, 0), elim_i1)
14603 		       || rtx_equal_p (XEXP (note, 0), elim_i0))
14604 		break;
14605 	      tem_insn = i3;
14606 	    }
14607 
14608 	  if (place == 0)
14609 	    {
14610 	      basic_block bb = this_basic_block;
14611 
14612 	      for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14613 		{
14614 		  if (!NONDEBUG_INSN_P (tem_insn))
14615 		    {
14616 		      if (tem_insn == BB_HEAD (bb))
14617 			break;
14618 		      continue;
14619 		    }
14620 
14621 		  /* If the register is being set at TEM_INSN, see if that is all
14622 		     TEM_INSN is doing.  If so, delete TEM_INSN.  Otherwise, make this
14623 		     into a REG_UNUSED note instead. Don't delete sets to
14624 		     global register vars.  */
14625 		  if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14626 		       || !global_regs[REGNO (XEXP (note, 0))])
14627 		      && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14628 		    {
14629 		      rtx set = single_set (tem_insn);
14630 		      rtx inner_dest = 0;
14631 		      rtx_insn *cc0_setter = NULL;
14632 
14633 		      if (set != 0)
14634 			for (inner_dest = SET_DEST (set);
14635 			     (GET_CODE (inner_dest) == STRICT_LOW_PART
14636 			      || GET_CODE (inner_dest) == SUBREG
14637 			      || GET_CODE (inner_dest) == ZERO_EXTRACT);
14638 			     inner_dest = XEXP (inner_dest, 0))
14639 			  ;
14640 
14641 		      /* Verify that it was the set, and not a clobber that
14642 			 modified the register.
14643 
14644 			 CC0 targets must be careful to maintain setter/user
14645 			 pairs.  If we cannot delete the setter due to side
14646 			 effects, mark the user with an UNUSED note instead
14647 			 of deleting it.  */
14648 
14649 		      if (set != 0 && ! side_effects_p (SET_SRC (set))
14650 			  && rtx_equal_p (XEXP (note, 0), inner_dest)
14651 			  && (!HAVE_cc0
14652 			      || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14653 				  || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14654 				      && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14655 			{
14656 			  /* Move the notes and links of TEM_INSN elsewhere.
14657 			     This might delete other dead insns recursively.
14658 			     First set the pattern to something that won't use
14659 			     any register.  */
14660 			  rtx old_notes = REG_NOTES (tem_insn);
14661 
14662 			  PATTERN (tem_insn) = pc_rtx;
14663 			  REG_NOTES (tem_insn) = NULL;
14664 
14665 			  distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14666 					    NULL_RTX, NULL_RTX, NULL_RTX);
14667 			  distribute_links (LOG_LINKS (tem_insn));
14668 
14669 			  unsigned int regno = REGNO (XEXP (note, 0));
14670 			  reg_stat_type *rsp = &reg_stat[regno];
14671 			  if (rsp->last_set == tem_insn)
14672 			    record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14673 
14674 			  SET_INSN_DELETED (tem_insn);
14675 			  if (tem_insn == i2)
14676 			    i2 = NULL;
14677 
14678 			  /* Delete the setter too.  */
14679 			  if (cc0_setter)
14680 			    {
14681 			      PATTERN (cc0_setter) = pc_rtx;
14682 			      old_notes = REG_NOTES (cc0_setter);
14683 			      REG_NOTES (cc0_setter) = NULL;
14684 
14685 			      distribute_notes (old_notes, cc0_setter,
14686 						cc0_setter, NULL,
14687 						NULL_RTX, NULL_RTX, NULL_RTX);
14688 			      distribute_links (LOG_LINKS (cc0_setter));
14689 
14690 			      SET_INSN_DELETED (cc0_setter);
14691 			      if (cc0_setter == i2)
14692 				i2 = NULL;
14693 			    }
14694 			}
14695 		      else
14696 			{
14697 			  PUT_REG_NOTE_KIND (note, REG_UNUSED);
14698 
14699 			  /*  If there isn't already a REG_UNUSED note, put one
14700 			      here.  Do not place a REG_DEAD note, even if
14701 			      the register is also used here; that would not
14702 			      match the algorithm used in lifetime analysis
14703 			      and can cause the consistency check in the
14704 			      scheduler to fail.  */
14705 			  if (! find_regno_note (tem_insn, REG_UNUSED,
14706 						 REGNO (XEXP (note, 0))))
14707 			    place = tem_insn;
14708 			  break;
14709 			}
14710 		    }
14711 		  else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14712 			   || (CALL_P (tem_insn)
14713 			       && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14714 		    {
14715 		      place = tem_insn;
14716 
14717 		      /* If we are doing a 3->2 combination, and we have a
14718 			 register which formerly died in i3 and was not used
14719 			 by i2, which now no longer dies in i3 and is used in
14720 			 i2 but does not die in i2, and place is between i2
14721 			 and i3, then we may need to move a link from place to
14722 			 i2.  */
14723 		      if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14724 			  && from_insn
14725 			  && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14726 			  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14727 			{
14728 			  struct insn_link *links = LOG_LINKS (place);
14729 			  LOG_LINKS (place) = NULL;
14730 			  distribute_links (links);
14731 			}
14732 		      break;
14733 		    }
14734 
14735 		  if (tem_insn == BB_HEAD (bb))
14736 		    break;
14737 		}
14738 
14739 	    }
14740 
14741 	  /* If the register is set or already dead at PLACE, we needn't do
14742 	     anything with this note if it is still a REG_DEAD note.
14743 	     We check here if it is set at all, not if is it totally replaced,
14744 	     which is what `dead_or_set_p' checks, so also check for it being
14745 	     set partially.  */
14746 
14747 	  if (place && REG_NOTE_KIND (note) == REG_DEAD)
14748 	    {
14749 	      unsigned int regno = REGNO (XEXP (note, 0));
14750 	      reg_stat_type *rsp = &reg_stat[regno];
14751 
14752 	      if (dead_or_set_p (place, XEXP (note, 0))
14753 		  || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14754 		{
14755 		  /* Unless the register previously died in PLACE, clear
14756 		     last_death.  [I no longer understand why this is
14757 		     being done.] */
14758 		  if (rsp->last_death != place)
14759 		    rsp->last_death = 0;
14760 		  place = 0;
14761 		}
14762 	      else
14763 		rsp->last_death = place;
14764 
14765 	      /* If this is a death note for a hard reg that is occupying
14766 		 multiple registers, ensure that we are still using all
14767 		 parts of the object.  If we find a piece of the object
14768 		 that is unused, we must arrange for an appropriate REG_DEAD
14769 		 note to be added for it.  However, we can't just emit a USE
14770 		 and tag the note to it, since the register might actually
14771 		 be dead; so we recourse, and the recursive call then finds
14772 		 the previous insn that used this register.  */
14773 
14774 	      if (place && REG_NREGS (XEXP (note, 0)) > 1)
14775 		{
14776 		  unsigned int endregno = END_REGNO (XEXP (note, 0));
14777 		  bool all_used = true;
14778 		  unsigned int i;
14779 
14780 		  for (i = regno; i < endregno; i++)
14781 		    if ((! refers_to_regno_p (i, PATTERN (place))
14782 			 && ! find_regno_fusage (place, USE, i))
14783 			|| dead_or_set_regno_p (place, i))
14784 		      {
14785 			all_used = false;
14786 			break;
14787 		      }
14788 
14789 		  if (! all_used)
14790 		    {
14791 		      /* Put only REG_DEAD notes for pieces that are
14792 			 not already dead or set.  */
14793 
14794 		      for (i = regno; i < endregno;
14795 			   i += hard_regno_nregs (i, reg_raw_mode[i]))
14796 			{
14797 			  rtx piece = regno_reg_rtx[i];
14798 			  basic_block bb = this_basic_block;
14799 
14800 			  if (! dead_or_set_p (place, piece)
14801 			      && ! reg_bitfield_target_p (piece,
14802 							  PATTERN (place)))
14803 			    {
14804 			      rtx new_note = alloc_reg_note (REG_DEAD, piece,
14805 							     NULL_RTX);
14806 
14807 			      distribute_notes (new_note, place, place,
14808 						NULL, NULL_RTX, NULL_RTX,
14809 						NULL_RTX);
14810 			    }
14811 			  else if (! refers_to_regno_p (i, PATTERN (place))
14812 				   && ! find_regno_fusage (place, USE, i))
14813 			    for (tem_insn = PREV_INSN (place); ;
14814 				 tem_insn = PREV_INSN (tem_insn))
14815 			      {
14816 				if (!NONDEBUG_INSN_P (tem_insn))
14817 				  {
14818 				    if (tem_insn == BB_HEAD (bb))
14819 			 	      break;
14820 				    continue;
14821 				  }
14822 				if (dead_or_set_p (tem_insn, piece)
14823 				    || reg_bitfield_target_p (piece,
14824 							      PATTERN (tem_insn)))
14825 				  {
14826 				    add_reg_note (tem_insn, REG_UNUSED, piece);
14827 				    break;
14828 				  }
14829 			      }
14830 			}
14831 
14832 		      place = 0;
14833 		    }
14834 		}
14835 	    }
14836 	  break;
14837 
14838 	default:
14839 	  /* Any other notes should not be present at this point in the
14840 	     compilation.  */
14841 	  gcc_unreachable ();
14842 	}
14843 
14844       if (place)
14845 	{
14846 	  XEXP (note, 1) = REG_NOTES (place);
14847 	  REG_NOTES (place) = note;
14848 
14849 	  /* Set added_notes_insn to the earliest insn we added a note to.  */
14850 	  if (added_notes_insn == 0
14851 	      || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14852 	    added_notes_insn = place;
14853 	}
14854 
14855       if (place2)
14856 	{
14857 	  add_shallow_copy_of_reg_note (place2, note);
14858 
14859 	  /* Set added_notes_insn to the earliest insn we added a note to.  */
14860 	  if (added_notes_insn == 0
14861 	      || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14862 	    added_notes_insn = place2;
14863 	}
14864     }
14865 }
14866 
14867 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14868    I3, I2, and I1 to new locations.  This is also called to add a link
14869    pointing at I3 when I3's destination is changed.  */
14870 
14871 static void
14872 distribute_links (struct insn_link *links)
14873 {
14874   struct insn_link *link, *next_link;
14875 
14876   for (link = links; link; link = next_link)
14877     {
14878       rtx_insn *place = 0;
14879       rtx_insn *insn;
14880       rtx set, reg;
14881 
14882       next_link = link->next;
14883 
14884       /* If the insn that this link points to is a NOTE, ignore it.  */
14885       if (NOTE_P (link->insn))
14886 	continue;
14887 
14888       set = 0;
14889       rtx pat = PATTERN (link->insn);
14890       if (GET_CODE (pat) == SET)
14891 	set = pat;
14892       else if (GET_CODE (pat) == PARALLEL)
14893 	{
14894 	  int i;
14895 	  for (i = 0; i < XVECLEN (pat, 0); i++)
14896 	    {
14897 	      set = XVECEXP (pat, 0, i);
14898 	      if (GET_CODE (set) != SET)
14899 		continue;
14900 
14901 	      reg = SET_DEST (set);
14902 	      while (GET_CODE (reg) == ZERO_EXTRACT
14903 		     || GET_CODE (reg) == STRICT_LOW_PART
14904 		     || GET_CODE (reg) == SUBREG)
14905 		reg = XEXP (reg, 0);
14906 
14907 	      if (!REG_P (reg))
14908 		continue;
14909 
14910 	      if (REGNO (reg) == link->regno)
14911 		break;
14912 	    }
14913 	  if (i == XVECLEN (pat, 0))
14914 	    continue;
14915 	}
14916       else
14917 	continue;
14918 
14919       reg = SET_DEST (set);
14920 
14921       while (GET_CODE (reg) == ZERO_EXTRACT
14922 	     || GET_CODE (reg) == STRICT_LOW_PART
14923 	     || GET_CODE (reg) == SUBREG)
14924 	reg = XEXP (reg, 0);
14925 
14926       if (reg == pc_rtx)
14927 	continue;
14928 
14929       /* A LOG_LINK is defined as being placed on the first insn that uses
14930 	 a register and points to the insn that sets the register.  Start
14931 	 searching at the next insn after the target of the link and stop
14932 	 when we reach a set of the register or the end of the basic block.
14933 
14934 	 Note that this correctly handles the link that used to point from
14935 	 I3 to I2.  Also note that not much searching is typically done here
14936 	 since most links don't point very far away.  */
14937 
14938       for (insn = NEXT_INSN (link->insn);
14939 	   (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14940 		     || BB_HEAD (this_basic_block->next_bb) != insn));
14941 	   insn = NEXT_INSN (insn))
14942 	if (DEBUG_INSN_P (insn))
14943 	  continue;
14944 	else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14945 	  {
14946 	    if (reg_referenced_p (reg, PATTERN (insn)))
14947 	      place = insn;
14948 	    break;
14949 	  }
14950 	else if (CALL_P (insn)
14951 		 && find_reg_fusage (insn, USE, reg))
14952 	  {
14953 	    place = insn;
14954 	    break;
14955 	  }
14956 	else if (INSN_P (insn) && reg_set_p (reg, insn))
14957 	  break;
14958 
14959       /* If we found a place to put the link, place it there unless there
14960 	 is already a link to the same insn as LINK at that point.  */
14961 
14962       if (place)
14963 	{
14964 	  struct insn_link *link2;
14965 
14966 	  FOR_EACH_LOG_LINK (link2, place)
14967 	    if (link2->insn == link->insn && link2->regno == link->regno)
14968 	      break;
14969 
14970 	  if (link2 == NULL)
14971 	    {
14972 	      link->next = LOG_LINKS (place);
14973 	      LOG_LINKS (place) = link;
14974 
14975 	      /* Set added_links_insn to the earliest insn we added a
14976 		 link to.  */
14977 	      if (added_links_insn == 0
14978 		  || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14979 		added_links_insn = place;
14980 	    }
14981 	}
14982     }
14983 }
14984 
14985 /* Check for any register or memory mentioned in EQUIV that is not
14986    mentioned in EXPR.  This is used to restrict EQUIV to "specializations"
14987    of EXPR where some registers may have been replaced by constants.  */
14988 
14989 static bool
14990 unmentioned_reg_p (rtx equiv, rtx expr)
14991 {
14992   subrtx_iterator::array_type array;
14993   FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14994     {
14995       const_rtx x = *iter;
14996       if ((REG_P (x) || MEM_P (x))
14997 	  && !reg_mentioned_p (x, expr))
14998 	return true;
14999     }
15000   return false;
15001 }
15002 
15003 DEBUG_FUNCTION void
15004 dump_combine_stats (FILE *file)
15005 {
15006   fprintf
15007     (file,
15008      ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
15009      combine_attempts, combine_merges, combine_extras, combine_successes);
15010 }
15011 
15012 void
15013 dump_combine_total_stats (FILE *file)
15014 {
15015   fprintf
15016     (file,
15017      "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15018      total_attempts, total_merges, total_extras, total_successes);
15019 }
15020 
15021 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15022    the reg-to-reg copy can usefully combine with later instructions, but we
15023    do not want to combine the hard reg into later instructions, for that
15024    restricts register allocation.  */
15025 static void
15026 make_more_copies (void)
15027 {
15028   basic_block bb;
15029 
15030   FOR_EACH_BB_FN (bb, cfun)
15031     {
15032       rtx_insn *insn;
15033 
15034       FOR_BB_INSNS (bb, insn)
15035         {
15036           if (!NONDEBUG_INSN_P (insn))
15037             continue;
15038 
15039 	  rtx set = single_set (insn);
15040 	  if (!set)
15041 	    continue;
15042 
15043 	  rtx dest = SET_DEST (set);
15044 	  if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
15045 	      continue;
15046 
15047 	  rtx src = SET_SRC (set);
15048 	  if (!(REG_P (src) && HARD_REGISTER_P (src)))
15049 	    continue;
15050 	  if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
15051 	    continue;
15052 
15053 	  rtx new_reg = gen_reg_rtx (GET_MODE (dest));
15054 	  rtx_insn *new_insn = gen_move_insn (new_reg, src);
15055 	  SET_SRC (set) = new_reg;
15056 	  emit_insn_before (new_insn, insn);
15057 	  df_insn_rescan (insn);
15058 	}
15059     }
15060 }
15061 
15062 /* Try combining insns through substitution.  */
15063 static unsigned int
15064 rest_of_handle_combine (void)
15065 {
15066   make_more_copies ();
15067 
15068   df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
15069   df_note_add_problem ();
15070   df_analyze ();
15071 
15072   regstat_init_n_sets_and_refs ();
15073   reg_n_sets_max = max_reg_num ();
15074 
15075   int rebuild_jump_labels_after_combine
15076     = combine_instructions (get_insns (), max_reg_num ());
15077 
15078   /* Combining insns may have turned an indirect jump into a
15079      direct jump.  Rebuild the JUMP_LABEL fields of jumping
15080      instructions.  */
15081   if (rebuild_jump_labels_after_combine)
15082     {
15083       if (dom_info_available_p (CDI_DOMINATORS))
15084 	free_dominance_info (CDI_DOMINATORS);
15085       timevar_push (TV_JUMP);
15086       rebuild_jump_labels (get_insns ());
15087       cleanup_cfg (0);
15088       timevar_pop (TV_JUMP);
15089     }
15090 
15091   regstat_free_n_sets_and_refs ();
15092   return 0;
15093 }
15094 
15095 namespace {
15096 
15097 const pass_data pass_data_combine =
15098 {
15099   RTL_PASS, /* type */
15100   "combine", /* name */
15101   OPTGROUP_NONE, /* optinfo_flags */
15102   TV_COMBINE, /* tv_id */
15103   PROP_cfglayout, /* properties_required */
15104   0, /* properties_provided */
15105   0, /* properties_destroyed */
15106   0, /* todo_flags_start */
15107   TODO_df_finish, /* todo_flags_finish */
15108 };
15109 
15110 class pass_combine : public rtl_opt_pass
15111 {
15112 public:
15113   pass_combine (gcc::context *ctxt)
15114     : rtl_opt_pass (pass_data_combine, ctxt)
15115   {}
15116 
15117   /* opt_pass methods: */
15118   virtual bool gate (function *) { return (optimize > 0); }
15119   virtual unsigned int execute (function *)
15120     {
15121       return rest_of_handle_combine ();
15122     }
15123 
15124 }; // class pass_combine
15125 
15126 } // anon namespace
15127 
15128 rtl_opt_pass *
15129 make_pass_combine (gcc::context *ctxt)
15130 {
15131   return new pass_combine (ctxt);
15132 }
15133