xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/combine.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Optimize by combining instructions for GNU compiler.
2    Copyright (C) 1987-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21    Portable Optimizer, but redone to work on our list-structured
22    representation for RTL instead of their string representation.
23 
24    The LOG_LINKS of each insn identify the most recent assignment
25    to each REG used in the insn.  It is a list of previous insns,
26    each of which contains a SET for a REG that is used in this insn
27    and not used or set in between.  LOG_LINKs never cross basic blocks.
28    They were set up by the preceding pass (lifetime analysis).
29 
30    We try to combine each pair of insns joined by a logical link.
31    We also try to combine triplets of insns A, B and C when C has
32    a link back to B and B has a link back to A.  Likewise for a
33    small number of quadruplets of insns A, B, C and D for which
34    there's high likelihood of success.
35 
36    LOG_LINKS does not have links for use of the CC0.  They don't
37    need to, because the insn that sets the CC0 is always immediately
38    before the insn that tests it.  So we always regard a branch
39    insn as having a logical link to the preceding insn.  The same is true
40    for an insn explicitly using CC0.
41 
42    We check (with use_crosses_set_p) to avoid combining in such a way
43    as to move a computation to a place where its value would be different.
44 
45    Combination is done by mathematically substituting the previous
46    insn(s) values for the regs they set into the expressions in
47    the later insns that refer to these regs.  If the result is a valid insn
48    for our target machine, according to the machine description,
49    we install it, delete the earlier insns, and update the data flow
50    information (LOG_LINKS and REG_NOTES) for what we did.
51 
52    There are a few exceptions where the dataflow information isn't
53    completely updated (however this is only a local issue since it is
54    regenerated before the next pass that uses it):
55 
56    - reg_live_length is not updated
57    - reg_n_refs is not adjusted in the rare case when a register is
58      no longer required in a computation
59    - there are extremely rare cases (see distribute_notes) when a
60      REG_DEAD note is lost
61    - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62      removed because there is no way to know which register it was
63      linking
64 
65    To simplify substitution, we combine only when the earlier insn(s)
66    consist of only a single assignment.  To simplify updating afterward,
67    we never combine when a subroutine call appears in the middle.
68 
69    Since we do not represent assignments to CC0 explicitly except when that
70    is all an insn does, there is no LOG_LINKS entry in an insn that uses
71    the condition code for the insn that set the condition code.
72    Fortunately, these two insns must be consecutive.
73    Therefore, every JUMP_INSN is taken to have an implicit logical link
74    to the preceding insn.  This is not quite right, since non-jumps can
75    also use the condition code; but in practice such insns would not
76    combine anyway.  */
77 
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107 
108 /* Number of attempts to combine instructions in this function.  */
109 
110 static int combine_attempts;
111 
112 /* Number of attempts that got as far as substitution in this function.  */
113 
114 static int combine_merges;
115 
116 /* Number of instructions combined with added SETs in this function.  */
117 
118 static int combine_extras;
119 
120 /* Number of instructions combined in this function.  */
121 
122 static int combine_successes;
123 
124 /* Totals over entire compilation.  */
125 
126 static int total_attempts, total_merges, total_extras, total_successes;
127 
128 /* combine_instructions may try to replace the right hand side of the
129    second instruction with the value of an associated REG_EQUAL note
130    before throwing it at try_combine.  That is problematic when there
131    is a REG_DEAD note for a register used in the old right hand side
132    and can cause distribute_notes to do wrong things.  This is the
133    second instruction if it has been so modified, null otherwise.  */
134 
135 static rtx_insn *i2mod;
136 
137 /* When I2MOD is nonnull, this is a copy of the old right hand side.  */
138 
139 static rtx i2mod_old_rhs;
140 
141 /* When I2MOD is nonnull, this is a copy of the new right hand side.  */
142 
143 static rtx i2mod_new_rhs;
144 
145 struct reg_stat_type {
146   /* Record last point of death of (hard or pseudo) register n.  */
147   rtx_insn			*last_death;
148 
149   /* Record last point of modification of (hard or pseudo) register n.  */
150   rtx_insn			*last_set;
151 
152   /* The next group of fields allows the recording of the last value assigned
153      to (hard or pseudo) register n.  We use this information to see if an
154      operation being processed is redundant given a prior operation performed
155      on the register.  For example, an `and' with a constant is redundant if
156      all the zero bits are already known to be turned off.
157 
158      We use an approach similar to that used by cse, but change it in the
159      following ways:
160 
161      (1) We do not want to reinitialize at each label.
162      (2) It is useful, but not critical, to know the actual value assigned
163 	 to a register.  Often just its form is helpful.
164 
165      Therefore, we maintain the following fields:
166 
167      last_set_value		the last value assigned
168      last_set_label		records the value of label_tick when the
169 				register was assigned
170      last_set_table_tick	records the value of label_tick when a
171 				value using the register is assigned
172      last_set_invalid		set to nonzero when it is not valid
173 				to use the value of this register in some
174 				register's value
175 
176      To understand the usage of these tables, it is important to understand
177      the distinction between the value in last_set_value being valid and
178      the register being validly contained in some other expression in the
179      table.
180 
181      (The next two parameters are out of date).
182 
183      reg_stat[i].last_set_value is valid if it is nonzero, and either
184      reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185 
186      Register I may validly appear in any expression returned for the value
187      of another register if reg_n_sets[i] is 1.  It may also appear in the
188      value for register J if reg_stat[j].last_set_invalid is zero, or
189      reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190 
191      If an expression is found in the table containing a register which may
192      not validly appear in an expression, the register is replaced by
193      something that won't match, (clobber (const_int 0)).  */
194 
195   /* Record last value assigned to (hard or pseudo) register n.  */
196 
197   rtx				last_set_value;
198 
199   /* Record the value of label_tick when an expression involving register n
200      is placed in last_set_value.  */
201 
202   int				last_set_table_tick;
203 
204   /* Record the value of label_tick when the value for register n is placed in
205      last_set_value.  */
206 
207   int				last_set_label;
208 
209   /* These fields are maintained in parallel with last_set_value and are
210      used to store the mode in which the register was last set, the bits
211      that were known to be zero when it was last set, and the number of
212      sign bits copies it was known to have when it was last set.  */
213 
214   unsigned HOST_WIDE_INT	last_set_nonzero_bits;
215   char				last_set_sign_bit_copies;
216   ENUM_BITFIELD(machine_mode)	last_set_mode : 8;
217 
218   /* Set nonzero if references to register n in expressions should not be
219      used.  last_set_invalid is set nonzero when this register is being
220      assigned to and last_set_table_tick == label_tick.  */
221 
222   char				last_set_invalid;
223 
224   /* Some registers that are set more than once and used in more than one
225      basic block are nevertheless always set in similar ways.  For example,
226      a QImode register may be loaded from memory in two places on a machine
227      where byte loads zero extend.
228 
229      We record in the following fields if a register has some leading bits
230      that are always equal to the sign bit, and what we know about the
231      nonzero bits of a register, specifically which bits are known to be
232      zero.
233 
234      If an entry is zero, it means that we don't know anything special.  */
235 
236   unsigned char			sign_bit_copies;
237 
238   unsigned HOST_WIDE_INT	nonzero_bits;
239 
240   /* Record the value of the label_tick when the last truncation
241      happened.  The field truncated_to_mode is only valid if
242      truncation_label == label_tick.  */
243 
244   int				truncation_label;
245 
246   /* Record the last truncation seen for this register.  If truncation
247      is not a nop to this mode we might be able to save an explicit
248      truncation if we know that value already contains a truncated
249      value.  */
250 
251   ENUM_BITFIELD(machine_mode)	truncated_to_mode : 8;
252 };
253 
254 
255 static vec<reg_stat_type> reg_stat;
256 
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258    regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259    but during combine_split_insns new pseudos can be created.  As we don't have
260    updated DF information in that case, it is hard to initialize the array
261    after growing.  The combiner only cares about REG_N_SETS (regno) == 1,
262    so instead of growing the arrays, just assume all newly created pseudos
263    during combine might be set multiple times.  */
264 
265 static unsigned int reg_n_sets_max;
266 
267 /* Record the luid of the last insn that invalidated memory
268    (anything that writes memory, and subroutine calls, but not pushes).  */
269 
270 static int mem_last_set;
271 
272 /* Record the luid of the last CALL_INSN
273    so we can tell whether a potential combination crosses any calls.  */
274 
275 static int last_call_luid;
276 
277 /* When `subst' is called, this is the insn that is being modified
278    (by combining in a previous insn).  The PATTERN of this insn
279    is still the old pattern partially modified and it should not be
280    looked at, but this may be used to examine the successors of the insn
281    to judge whether a simplification is valid.  */
282 
283 static rtx_insn *subst_insn;
284 
285 /* This is the lowest LUID that `subst' is currently dealing with.
286    get_last_value will not return a value if the register was set at or
287    after this LUID.  If not for this mechanism, we could get confused if
288    I2 or I1 in try_combine were an insn that used the old value of a register
289    to obtain a new value.  In that case, we might erroneously get the
290    new value of the register when we wanted the old one.  */
291 
292 static int subst_low_luid;
293 
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295    must consider all these registers to be always live.  */
296 
297 static HARD_REG_SET newpat_used_regs;
298 
299 /* This is an insn to which a LOG_LINKS entry has been added.  If this
300    insn is the earlier than I2 or I3, combine should rescan starting at
301    that location.  */
302 
303 static rtx_insn *added_links_insn;
304 
305 /* Basic block in which we are performing combines.  */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
308 
309 
310 /* Length of the currently allocated uid_insn_cost array.  */
311 
312 static int max_uid_known;
313 
314 /* The following array records the insn_rtx_cost for every insn
315    in the instruction stream.  */
316 
317 static int *uid_insn_cost;
318 
319 /* The following array records the LOG_LINKS for every insn in the
320    instruction stream as struct insn_link pointers.  */
321 
322 struct insn_link {
323   rtx_insn *insn;
324   unsigned int regno;
325   struct insn_link *next;
326 };
327 
328 static struct insn_link **uid_log_links;
329 
330 static inline int
331 insn_uid_check (const_rtx insn)
332 {
333   int uid = INSN_UID (insn);
334   gcc_checking_assert (uid <= max_uid_known);
335   return uid;
336 }
337 
338 #define INSN_COST(INSN)		(uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN)		(uid_log_links[insn_uid_check (INSN)])
340 
341 #define FOR_EACH_LOG_LINK(L, INSN)				\
342   for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
343 
344 /* Links for LOG_LINKS are allocated from this obstack.  */
345 
346 static struct obstack insn_link_obstack;
347 
348 /* Allocate a link.  */
349 
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
352 {
353   struct insn_link *l
354     = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 					  sizeof (struct insn_link));
356   l->insn = insn;
357   l->regno = regno;
358   l->next = next;
359   return l;
360 }
361 
362 /* Incremented for each basic block.  */
363 
364 static int label_tick;
365 
366 /* Reset to label_tick for each extended basic block in scanning order.  */
367 
368 static int label_tick_ebb_start;
369 
370 /* Mode used to compute significance in reg_stat[].nonzero_bits.  It is the
371    largest integer mode that can fit in HOST_BITS_PER_WIDE_INT.  */
372 
373 static machine_mode nonzero_bits_mode;
374 
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376    be safely used.  It is zero while computing them and after combine has
377    completed.  This former test prevents propagating values based on
378    previously set values, which can be incorrect if a variable is modified
379    in a loop.  */
380 
381 static int nonzero_sign_valid;
382 
383 
384 /* Record one modification to rtl structure
385    to be undone by storing old_contents into *where.  */
386 
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
388 
389 struct undo
390 {
391   struct undo *next;
392   enum undo_kind kind;
393   union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394   union { rtx *r; int *i; struct insn_link **l; } where;
395 };
396 
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398    num_undo says how many are currently recorded.
399 
400    other_insn is nonzero if we have modified some other insn in the process
401    of working on subst_insn.  It must be verified too.  */
402 
403 struct undobuf
404 {
405   struct undo *undos;
406   struct undo *frees;
407   rtx_insn *other_insn;
408 };
409 
410 static struct undobuf undobuf;
411 
412 /* Number of times the pseudo being substituted for
413    was found and replaced.  */
414 
415 static int n_occurrences;
416 
417 static rtx reg_nonzero_bits_for_combine (const_rtx, machine_mode, const_rtx,
418 					 machine_mode,
419 					 unsigned HOST_WIDE_INT,
420 					 unsigned HOST_WIDE_INT *);
421 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, machine_mode, const_rtx,
422 						machine_mode,
423 						unsigned int, unsigned int *);
424 static void do_SUBST (rtx *, rtx);
425 static void do_SUBST_INT (int *, int);
426 static void init_reg_last (void);
427 static void setup_incoming_promotions (rtx_insn *);
428 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
429 static int cant_combine_insn_p (rtx_insn *);
430 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
431 			  rtx_insn *, rtx_insn *, rtx *, rtx *);
432 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
433 static int contains_muldiv (rtx);
434 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 			      int *, rtx_insn *);
436 static void undo_all (void);
437 static void undo_commit (void);
438 static rtx *find_split_point (rtx *, rtx_insn *, bool);
439 static rtx subst (rtx, rtx, rtx, int, int, int);
440 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
441 static rtx simplify_if_then_else (rtx);
442 static rtx simplify_set (rtx);
443 static rtx simplify_logical (rtx);
444 static rtx expand_compound_operation (rtx);
445 static const_rtx expand_field_assignment (const_rtx);
446 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
447 			    rtx, unsigned HOST_WIDE_INT, int, int, int);
448 static rtx extract_left_shift (rtx, int);
449 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
450 			      unsigned HOST_WIDE_INT *);
451 static rtx canon_reg_for_combine (rtx, rtx);
452 static rtx force_to_mode (rtx, machine_mode,
453 			  unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (machine_mode, rtx,
461 				     unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, machine_mode, rtx,
463 				   unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 			    HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
468 				 int);
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
472 					     rtx, rtx *);
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
494 
495 
496 /* It is not safe to use ordinary gen_lowpart in combine.
497    See comments in gen_lowpart_for_combine.  */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART              gen_lowpart_for_combine
500 
501 /* Our implementation of gen_lowpart never emits a new pseudo.  */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT      gen_lowpart_for_combine
504 
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS     reg_nonzero_bits_for_combine
507 
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES  reg_num_sign_bit_copies_for_combine
510 
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE    reg_truncated_to_mode
513 
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
515 
516 
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518    Target hooks cannot use enum rtx_code.  */
519 static inline void
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 				bool op0_preserve_value)
522 {
523   int code_int = (int)*code;
524   targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525   *code = (enum rtx_code)code_int;
526 }
527 
528 /* Try to split PATTERN found in INSN.  This returns NULL_RTX if
529    PATTERN can not be split.  Otherwise, it returns an insn sequence.
530    This is a wrapper around split_insns which ensures that the
531    reg_stat vector is made larger if the splitter creates a new
532    register.  */
533 
534 static rtx_insn *
535 combine_split_insns (rtx pattern, rtx_insn *insn)
536 {
537   rtx_insn *ret;
538   unsigned int nregs;
539 
540   ret = split_insns (pattern, insn);
541   nregs = max_reg_num ();
542   if (nregs > reg_stat.length ())
543     reg_stat.safe_grow_cleared (nregs);
544   return ret;
545 }
546 
547 /* This is used by find_single_use to locate an rtx in LOC that
548    contains exactly one use of DEST, which is typically either a REG
549    or CC0.  It returns a pointer to the innermost rtx expression
550    containing DEST.  Appearances of DEST that are being used to
551    totally replace it are not counted.  */
552 
553 static rtx *
554 find_single_use_1 (rtx dest, rtx *loc)
555 {
556   rtx x = *loc;
557   enum rtx_code code = GET_CODE (x);
558   rtx *result = NULL;
559   rtx *this_result;
560   int i;
561   const char *fmt;
562 
563   switch (code)
564     {
565     case CONST:
566     case LABEL_REF:
567     case SYMBOL_REF:
568     CASE_CONST_ANY:
569     case CLOBBER:
570       return 0;
571 
572     case SET:
573       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 	 of a REG that occupies all of the REG, the insn uses DEST if
575 	 it is mentioned in the destination or the source.  Otherwise, we
576 	 need just check the source.  */
577       if (GET_CODE (SET_DEST (x)) != CC0
578 	  && GET_CODE (SET_DEST (x)) != PC
579 	  && !REG_P (SET_DEST (x))
580 	  && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 		&& REG_P (SUBREG_REG (SET_DEST (x)))
582 		&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
583 		      + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
584 		    == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
585 			 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
586 	break;
587 
588       return find_single_use_1 (dest, &SET_SRC (x));
589 
590     case MEM:
591     case SUBREG:
592       return find_single_use_1 (dest, &XEXP (x, 0));
593 
594     default:
595       break;
596     }
597 
598   /* If it wasn't one of the common cases above, check each expression and
599      vector of this code.  Look for a unique usage of DEST.  */
600 
601   fmt = GET_RTX_FORMAT (code);
602   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
603     {
604       if (fmt[i] == 'e')
605 	{
606 	  if (dest == XEXP (x, i)
607 	      || (REG_P (dest) && REG_P (XEXP (x, i))
608 		  && REGNO (dest) == REGNO (XEXP (x, i))))
609 	    this_result = loc;
610 	  else
611 	    this_result = find_single_use_1 (dest, &XEXP (x, i));
612 
613 	  if (result == NULL)
614 	    result = this_result;
615 	  else if (this_result)
616 	    /* Duplicate usage.  */
617 	    return NULL;
618 	}
619       else if (fmt[i] == 'E')
620 	{
621 	  int j;
622 
623 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
624 	    {
625 	      if (XVECEXP (x, i, j) == dest
626 		  || (REG_P (dest)
627 		      && REG_P (XVECEXP (x, i, j))
628 		      && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 		this_result = loc;
630 	      else
631 		this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
632 
633 	      if (result == NULL)
634 		result = this_result;
635 	      else if (this_result)
636 		return NULL;
637 	    }
638 	}
639     }
640 
641   return result;
642 }
643 
644 
645 /* See if DEST, produced in INSN, is used only a single time in the
646    sequel.  If so, return a pointer to the innermost rtx expression in which
647    it is used.
648 
649    If PLOC is nonzero, *PLOC is set to the insn containing the single use.
650 
651    If DEST is cc0_rtx, we look only at the next insn.  In that case, we don't
652    care about REG_DEAD notes or LOG_LINKS.
653 
654    Otherwise, we find the single use by finding an insn that has a
655    LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST.  If DEST is
656    only referenced once in that insn, we know that it must be the first
657    and last insn referencing DEST.  */
658 
659 static rtx *
660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
661 {
662   basic_block bb;
663   rtx_insn *next;
664   rtx *result;
665   struct insn_link *link;
666 
667   if (dest == cc0_rtx)
668     {
669       next = NEXT_INSN (insn);
670       if (next == 0
671 	  || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 	return 0;
673 
674       result = find_single_use_1 (dest, &PATTERN (next));
675       if (result && ploc)
676 	*ploc = next;
677       return result;
678     }
679 
680   if (!REG_P (dest))
681     return 0;
682 
683   bb = BLOCK_FOR_INSN (insn);
684   for (next = NEXT_INSN (insn);
685        next && BLOCK_FOR_INSN (next) == bb;
686        next = NEXT_INSN (next))
687     if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
688       {
689 	FOR_EACH_LOG_LINK (link, next)
690 	  if (link->insn == insn && link->regno == REGNO (dest))
691 	    break;
692 
693 	if (link)
694 	  {
695 	    result = find_single_use_1 (dest, &PATTERN (next));
696 	    if (ploc)
697 	      *ploc = next;
698 	    return result;
699 	  }
700       }
701 
702   return 0;
703 }
704 
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706    insn.  The substitution can be undone by undo_all.  If INTO is already
707    set to NEWVAL, do not record this change.  Because computing NEWVAL might
708    also call SUBST, we have to compute it before we put anything into
709    the undo table.  */
710 
711 static void
712 do_SUBST (rtx *into, rtx newval)
713 {
714   struct undo *buf;
715   rtx oldval = *into;
716 
717   if (oldval == newval)
718     return;
719 
720   /* We'd like to catch as many invalid transformations here as
721      possible.  Unfortunately, there are way too many mode changes
722      that are perfectly valid, so we'd waste too much effort for
723      little gain doing the checks here.  Focus on catching invalid
724      transformations involving integer constants.  */
725   if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726       && CONST_INT_P (newval))
727     {
728       /* Sanity check that we're replacing oldval with a CONST_INT
729 	 that is a valid sign-extension for the original mode.  */
730       gcc_assert (INTVAL (newval)
731 		  == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
732 
733       /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 	 CONST_INT is not valid, because after the replacement, the
735 	 original mode would be gone.  Unfortunately, we can't tell
736 	 when do_SUBST is called to replace the operand thereof, so we
737 	 perform this test on oldval instead, checking whether an
738 	 invalid replacement took place before we got here.  */
739       gcc_assert (!(GET_CODE (oldval) == SUBREG
740 		    && CONST_INT_P (SUBREG_REG (oldval))));
741       gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 		    && CONST_INT_P (XEXP (oldval, 0))));
743     }
744 
745   if (undobuf.frees)
746     buf = undobuf.frees, undobuf.frees = buf->next;
747   else
748     buf = XNEW (struct undo);
749 
750   buf->kind = UNDO_RTX;
751   buf->where.r = into;
752   buf->old_contents.r = oldval;
753   *into = newval;
754 
755   buf->next = undobuf.undos, undobuf.undos = buf;
756 }
757 
758 #define SUBST(INTO, NEWVAL)	do_SUBST (&(INTO), (NEWVAL))
759 
760 /* Similar to SUBST, but NEWVAL is an int expression.  Note that substitution
761    for the value of a HOST_WIDE_INT value (including CONST_INT) is
762    not safe.  */
763 
764 static void
765 do_SUBST_INT (int *into, int newval)
766 {
767   struct undo *buf;
768   int oldval = *into;
769 
770   if (oldval == newval)
771     return;
772 
773   if (undobuf.frees)
774     buf = undobuf.frees, undobuf.frees = buf->next;
775   else
776     buf = XNEW (struct undo);
777 
778   buf->kind = UNDO_INT;
779   buf->where.i = into;
780   buf->old_contents.i = oldval;
781   *into = newval;
782 
783   buf->next = undobuf.undos, undobuf.undos = buf;
784 }
785 
786 #define SUBST_INT(INTO, NEWVAL)  do_SUBST_INT (&(INTO), (NEWVAL))
787 
788 /* Similar to SUBST, but just substitute the mode.  This is used when
789    changing the mode of a pseudo-register, so that any other
790    references to the entry in the regno_reg_rtx array will change as
791    well.  */
792 
793 static void
794 do_SUBST_MODE (rtx *into, machine_mode newval)
795 {
796   struct undo *buf;
797   machine_mode oldval = GET_MODE (*into);
798 
799   if (oldval == newval)
800     return;
801 
802   if (undobuf.frees)
803     buf = undobuf.frees, undobuf.frees = buf->next;
804   else
805     buf = XNEW (struct undo);
806 
807   buf->kind = UNDO_MODE;
808   buf->where.r = into;
809   buf->old_contents.m = oldval;
810   adjust_reg_mode (*into, newval);
811 
812   buf->next = undobuf.undos, undobuf.undos = buf;
813 }
814 
815 #define SUBST_MODE(INTO, NEWVAL)  do_SUBST_MODE (&(INTO), (NEWVAL))
816 
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression.  */
818 
819 static void
820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
821 {
822   struct undo *buf;
823   struct insn_link * oldval = *into;
824 
825   if (oldval == newval)
826     return;
827 
828   if (undobuf.frees)
829     buf = undobuf.frees, undobuf.frees = buf->next;
830   else
831     buf = XNEW (struct undo);
832 
833   buf->kind = UNDO_LINKS;
834   buf->where.l = into;
835   buf->old_contents.l = oldval;
836   *into = newval;
837 
838   buf->next = undobuf.undos, undobuf.undos = buf;
839 }
840 
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
842 
843 /* Subroutine of try_combine.  Determine whether the replacement patterns
844    NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845    than the original sequence I0, I1, I2, I3 and undobuf.other_insn.  Note
846    that I0, I1 and/or NEWI2PAT may be NULL_RTX.  Similarly, NEWOTHERPAT and
847    undobuf.other_insn may also both be NULL_RTX.  Return false if the cost
848    of all the instructions can be estimated and the replacements are more
849    expensive than the original sequence.  */
850 
851 static bool
852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 		       rtx newpat, rtx newi2pat, rtx newotherpat)
854 {
855   int i0_cost, i1_cost, i2_cost, i3_cost;
856   int new_i2_cost, new_i3_cost;
857   int old_cost, new_cost;
858 
859   /* Lookup the original insn_rtx_costs.  */
860   i2_cost = INSN_COST (i2);
861   i3_cost = INSN_COST (i3);
862 
863   if (i1)
864     {
865       i1_cost = INSN_COST (i1);
866       if (i0)
867 	{
868 	  i0_cost = INSN_COST (i0);
869 	  old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 		      ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
871 	}
872       else
873 	{
874 	  old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 		      ? i1_cost + i2_cost + i3_cost : 0);
876 	  i0_cost = 0;
877 	}
878     }
879   else
880     {
881       old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882       i1_cost = i0_cost = 0;
883     }
884 
885   /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886      correct that.  */
887   if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888     old_cost -= i1_cost;
889 
890 
891   /* Calculate the replacement insn_rtx_costs.  */
892   new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
893   if (newi2pat)
894     {
895       new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
896       new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
897 		 ? new_i2_cost + new_i3_cost : 0;
898     }
899   else
900     {
901       new_cost = new_i3_cost;
902       new_i2_cost = 0;
903     }
904 
905   if (undobuf.other_insn)
906     {
907       int old_other_cost, new_other_cost;
908 
909       old_other_cost = INSN_COST (undobuf.other_insn);
910       new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
911       if (old_other_cost > 0 && new_other_cost > 0)
912 	{
913 	  old_cost += old_other_cost;
914 	  new_cost += new_other_cost;
915 	}
916       else
917 	old_cost = 0;
918     }
919 
920   /* Disallow this combination if both new_cost and old_cost are greater than
921      zero, and new_cost is greater than old cost.  */
922   int reject = old_cost > 0 && new_cost > old_cost;
923 
924   if (dump_file)
925     {
926       fprintf (dump_file, "%s combination of insns ",
927 	       reject ? "rejecting" : "allowing");
928       if (i0)
929 	fprintf (dump_file, "%d, ", INSN_UID (i0));
930       if (i1 && INSN_UID (i1) != INSN_UID (i2))
931 	fprintf (dump_file, "%d, ", INSN_UID (i1));
932       fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
933 
934       fprintf (dump_file, "original costs ");
935       if (i0)
936 	fprintf (dump_file, "%d + ", i0_cost);
937       if (i1 && INSN_UID (i1) != INSN_UID (i2))
938 	fprintf (dump_file, "%d + ", i1_cost);
939       fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
940 
941       if (newi2pat)
942 	fprintf (dump_file, "replacement costs %d + %d = %d\n",
943 		 new_i2_cost, new_i3_cost, new_cost);
944       else
945 	fprintf (dump_file, "replacement cost %d\n", new_cost);
946     }
947 
948   if (reject)
949     return false;
950 
951   /* Update the uid_insn_cost array with the replacement costs.  */
952   INSN_COST (i2) = new_i2_cost;
953   INSN_COST (i3) = new_i3_cost;
954   if (i1)
955     {
956       INSN_COST (i1) = 0;
957       if (i0)
958 	INSN_COST (i0) = 0;
959     }
960 
961   return true;
962 }
963 
964 
965 /* Delete any insns that copy a register to itself.
966    Return true if the CFG was changed.  */
967 
968 static bool
969 delete_noop_moves (void)
970 {
971   rtx_insn *insn, *next;
972   basic_block bb;
973 
974   bool edges_deleted = false;
975 
976   FOR_EACH_BB_FN (bb, cfun)
977     {
978       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
979 	{
980 	  next = NEXT_INSN (insn);
981 	  if (INSN_P (insn) && noop_move_p (insn))
982 	    {
983 	      if (dump_file)
984 		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
985 
986 	      edges_deleted |= delete_insn_and_edges (insn);
987 	    }
988 	}
989     }
990 
991   return edges_deleted;
992 }
993 
994 
995 /* Return false if we do not want to (or cannot) combine DEF.  */
996 static bool
997 can_combine_def_p (df_ref def)
998 {
999   /* Do not consider if it is pre/post modification in MEM.  */
1000   if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1001     return false;
1002 
1003   unsigned int regno = DF_REF_REGNO (def);
1004 
1005   /* Do not combine frame pointer adjustments.  */
1006   if ((regno == FRAME_POINTER_REGNUM
1007        && (!reload_completed || frame_pointer_needed))
1008       || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1009 	  && regno == HARD_FRAME_POINTER_REGNUM
1010 	  && (!reload_completed || frame_pointer_needed))
1011       || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1012 	  && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1013     return false;
1014 
1015   return true;
1016 }
1017 
1018 /* Return false if we do not want to (or cannot) combine USE.  */
1019 static bool
1020 can_combine_use_p (df_ref use)
1021 {
1022   /* Do not consider the usage of the stack pointer by function call.  */
1023   if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1024     return false;
1025 
1026   return true;
1027 }
1028 
1029 /* Fill in log links field for all insns.  */
1030 
1031 static void
1032 create_log_links (void)
1033 {
1034   basic_block bb;
1035   rtx_insn **next_use;
1036   rtx_insn *insn;
1037   df_ref def, use;
1038 
1039   next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1040 
1041   /* Pass through each block from the end, recording the uses of each
1042      register and establishing log links when def is encountered.
1043      Note that we do not clear next_use array in order to save time,
1044      so we have to test whether the use is in the same basic block as def.
1045 
1046      There are a few cases below when we do not consider the definition or
1047      usage -- these are taken from original flow.c did. Don't ask me why it is
1048      done this way; I don't know and if it works, I don't want to know.  */
1049 
1050   FOR_EACH_BB_FN (bb, cfun)
1051     {
1052       FOR_BB_INSNS_REVERSE (bb, insn)
1053         {
1054           if (!NONDEBUG_INSN_P (insn))
1055             continue;
1056 
1057 	  /* Log links are created only once.  */
1058 	  gcc_assert (!LOG_LINKS (insn));
1059 
1060 	  FOR_EACH_INSN_DEF (def, insn)
1061             {
1062               unsigned int regno = DF_REF_REGNO (def);
1063               rtx_insn *use_insn;
1064 
1065               if (!next_use[regno])
1066                 continue;
1067 
1068 	      if (!can_combine_def_p (def))
1069 		continue;
1070 
1071 	      use_insn = next_use[regno];
1072 	      next_use[regno] = NULL;
1073 
1074 	      if (BLOCK_FOR_INSN (use_insn) != bb)
1075 		continue;
1076 
1077 	      /* flow.c claimed:
1078 
1079 		 We don't build a LOG_LINK for hard registers contained
1080 		 in ASM_OPERANDs.  If these registers get replaced,
1081 		 we might wind up changing the semantics of the insn,
1082 		 even if reload can make what appear to be valid
1083 		 assignments later.  */
1084 	      if (regno < FIRST_PSEUDO_REGISTER
1085 		  && asm_noperands (PATTERN (use_insn)) >= 0)
1086 		continue;
1087 
1088 	      /* Don't add duplicate links between instructions.  */
1089 	      struct insn_link *links;
1090 	      FOR_EACH_LOG_LINK (links, use_insn)
1091 	        if (insn == links->insn && regno == links->regno)
1092 		  break;
1093 
1094 	      if (!links)
1095 		LOG_LINKS (use_insn)
1096 		  = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1097             }
1098 
1099 	  FOR_EACH_INSN_USE (use, insn)
1100 	    if (can_combine_use_p (use))
1101 	      next_use[DF_REF_REGNO (use)] = insn;
1102         }
1103     }
1104 
1105   free (next_use);
1106 }
1107 
1108 /* Walk the LOG_LINKS of insn B to see if we find a reference to A.  Return
1109    true if we found a LOG_LINK that proves that A feeds B.  This only works
1110    if there are no instructions between A and B which could have a link
1111    depending on A, since in that case we would not record a link for B.
1112    We also check the implicit dependency created by a cc0 setter/user
1113    pair.  */
1114 
1115 static bool
1116 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1117 {
1118   struct insn_link *links;
1119   FOR_EACH_LOG_LINK (links, b)
1120     if (links->insn == a)
1121       return true;
1122   if (HAVE_cc0 && sets_cc0_p (a))
1123     return true;
1124   return false;
1125 }
1126 
1127 /* Main entry point for combiner.  F is the first insn of the function.
1128    NREGS is the first unused pseudo-reg number.
1129 
1130    Return nonzero if the CFG was changed (e.g. if the combiner has
1131    turned an indirect jump instruction into a direct jump).  */
1132 static int
1133 combine_instructions (rtx_insn *f, unsigned int nregs)
1134 {
1135   rtx_insn *insn, *next;
1136   rtx_insn *prev;
1137   struct insn_link *links, *nextlinks;
1138   rtx_insn *first;
1139   basic_block last_bb;
1140 
1141   int new_direct_jump_p = 0;
1142 
1143   for (first = f; first && !NONDEBUG_INSN_P (first); )
1144     first = NEXT_INSN (first);
1145   if (!first)
1146     return 0;
1147 
1148   combine_attempts = 0;
1149   combine_merges = 0;
1150   combine_extras = 0;
1151   combine_successes = 0;
1152 
1153   rtl_hooks = combine_rtl_hooks;
1154 
1155   reg_stat.safe_grow_cleared (nregs);
1156 
1157   init_recog_no_volatile ();
1158 
1159   /* Allocate array for insn info.  */
1160   max_uid_known = get_max_uid ();
1161   uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1162   uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1163   gcc_obstack_init (&insn_link_obstack);
1164 
1165   nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1166 
1167   /* Don't use reg_stat[].nonzero_bits when computing it.  This can cause
1168      problems when, for example, we have j <<= 1 in a loop.  */
1169 
1170   nonzero_sign_valid = 0;
1171   label_tick = label_tick_ebb_start = 1;
1172 
1173   /* Scan all SETs and see if we can deduce anything about what
1174      bits are known to be zero for some registers and how many copies
1175      of the sign bit are known to exist for those registers.
1176 
1177      Also set any known values so that we can use it while searching
1178      for what bits are known to be set.  */
1179 
1180   setup_incoming_promotions (first);
1181   /* Allow the entry block and the first block to fall into the same EBB.
1182      Conceptually the incoming promotions are assigned to the entry block.  */
1183   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1184 
1185   create_log_links ();
1186   FOR_EACH_BB_FN (this_basic_block, cfun)
1187     {
1188       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1189       last_call_luid = 0;
1190       mem_last_set = -1;
1191 
1192       label_tick++;
1193       if (!single_pred_p (this_basic_block)
1194 	  || single_pred (this_basic_block) != last_bb)
1195 	label_tick_ebb_start = label_tick;
1196       last_bb = this_basic_block;
1197 
1198       FOR_BB_INSNS (this_basic_block, insn)
1199         if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1200 	  {
1201             rtx links;
1202 
1203             subst_low_luid = DF_INSN_LUID (insn);
1204             subst_insn = insn;
1205 
1206 	    note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1207 		         insn);
1208 	    record_dead_and_set_regs (insn);
1209 
1210 	    if (AUTO_INC_DEC)
1211 	      for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1212 		if (REG_NOTE_KIND (links) == REG_INC)
1213 		  set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1214 						    insn);
1215 
1216 	    /* Record the current insn_rtx_cost of this instruction.  */
1217 	    if (NONJUMP_INSN_P (insn))
1218 	      INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1219 	      					optimize_this_for_speed_p);
1220 	    if (dump_file)
1221 	      fprintf (dump_file, "insn_cost %d: %d\n",
1222 		       INSN_UID (insn), INSN_COST (insn));
1223 	  }
1224     }
1225 
1226   nonzero_sign_valid = 1;
1227 
1228   /* Now scan all the insns in forward order.  */
1229   label_tick = label_tick_ebb_start = 1;
1230   init_reg_last ();
1231   setup_incoming_promotions (first);
1232   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1233   int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1234 
1235   FOR_EACH_BB_FN (this_basic_block, cfun)
1236     {
1237       rtx_insn *last_combined_insn = NULL;
1238       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1239       last_call_luid = 0;
1240       mem_last_set = -1;
1241 
1242       label_tick++;
1243       if (!single_pred_p (this_basic_block)
1244 	  || single_pred (this_basic_block) != last_bb)
1245 	label_tick_ebb_start = label_tick;
1246       last_bb = this_basic_block;
1247 
1248       rtl_profile_for_bb (this_basic_block);
1249       for (insn = BB_HEAD (this_basic_block);
1250 	   insn != NEXT_INSN (BB_END (this_basic_block));
1251 	   insn = next ? next : NEXT_INSN (insn))
1252 	{
1253 	  next = 0;
1254 	  if (!NONDEBUG_INSN_P (insn))
1255 	    continue;
1256 
1257 	  while (last_combined_insn
1258 		 && (!NONDEBUG_INSN_P (last_combined_insn)
1259 		     || last_combined_insn->deleted ()))
1260 	    last_combined_insn = PREV_INSN (last_combined_insn);
1261 	  if (last_combined_insn == NULL_RTX
1262 	      || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1263 	      || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1264 	    last_combined_insn = insn;
1265 
1266 	  /* See if we know about function return values before this
1267 	     insn based upon SUBREG flags.  */
1268 	  check_promoted_subreg (insn, PATTERN (insn));
1269 
1270 	  /* See if we can find hardregs and subreg of pseudos in
1271 	     narrower modes.  This could help turning TRUNCATEs
1272 	     into SUBREGs.  */
1273 	  note_uses (&PATTERN (insn), record_truncated_values, NULL);
1274 
1275 	  /* Try this insn with each insn it links back to.  */
1276 
1277 	  FOR_EACH_LOG_LINK (links, insn)
1278 	    if ((next = try_combine (insn, links->insn, NULL,
1279 				     NULL, &new_direct_jump_p,
1280 				     last_combined_insn)) != 0)
1281 	      {
1282 		statistics_counter_event (cfun, "two-insn combine", 1);
1283 		goto retry;
1284 	      }
1285 
1286 	  /* Try each sequence of three linked insns ending with this one.  */
1287 
1288 	  if (max_combine >= 3)
1289 	    FOR_EACH_LOG_LINK (links, insn)
1290 	      {
1291 		rtx_insn *link = links->insn;
1292 
1293 		/* If the linked insn has been replaced by a note, then there
1294 		   is no point in pursuing this chain any further.  */
1295 		if (NOTE_P (link))
1296 		  continue;
1297 
1298 		FOR_EACH_LOG_LINK (nextlinks, link)
1299 		  if ((next = try_combine (insn, link, nextlinks->insn,
1300 					   NULL, &new_direct_jump_p,
1301 					   last_combined_insn)) != 0)
1302 		    {
1303 		      statistics_counter_event (cfun, "three-insn combine", 1);
1304 		      goto retry;
1305 		    }
1306 	      }
1307 
1308 	  /* Try to combine a jump insn that uses CC0
1309 	     with a preceding insn that sets CC0, and maybe with its
1310 	     logical predecessor as well.
1311 	     This is how we make decrement-and-branch insns.
1312 	     We need this special code because data flow connections
1313 	     via CC0 do not get entered in LOG_LINKS.  */
1314 
1315 	  if (HAVE_cc0
1316 	      && JUMP_P (insn)
1317 	      && (prev = prev_nonnote_insn (insn)) != 0
1318 	      && NONJUMP_INSN_P (prev)
1319 	      && sets_cc0_p (PATTERN (prev)))
1320 	    {
1321 	      if ((next = try_combine (insn, prev, NULL, NULL,
1322 				       &new_direct_jump_p,
1323 				       last_combined_insn)) != 0)
1324 		goto retry;
1325 
1326 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1327 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1328 					   NULL, &new_direct_jump_p,
1329 					   last_combined_insn)) != 0)
1330 		    goto retry;
1331 	    }
1332 
1333 	  /* Do the same for an insn that explicitly references CC0.  */
1334 	  if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1335 	      && (prev = prev_nonnote_insn (insn)) != 0
1336 	      && NONJUMP_INSN_P (prev)
1337 	      && sets_cc0_p (PATTERN (prev))
1338 	      && GET_CODE (PATTERN (insn)) == SET
1339 	      && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1340 	    {
1341 	      if ((next = try_combine (insn, prev, NULL, NULL,
1342 				       &new_direct_jump_p,
1343 				       last_combined_insn)) != 0)
1344 		goto retry;
1345 
1346 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1347 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1348 					   NULL, &new_direct_jump_p,
1349 					   last_combined_insn)) != 0)
1350 		    goto retry;
1351 	    }
1352 
1353 	  /* Finally, see if any of the insns that this insn links to
1354 	     explicitly references CC0.  If so, try this insn, that insn,
1355 	     and its predecessor if it sets CC0.  */
1356 	  if (HAVE_cc0)
1357 	    {
1358 	      FOR_EACH_LOG_LINK (links, insn)
1359 		if (NONJUMP_INSN_P (links->insn)
1360 		    && GET_CODE (PATTERN (links->insn)) == SET
1361 		    && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1362 		    && (prev = prev_nonnote_insn (links->insn)) != 0
1363 		    && NONJUMP_INSN_P (prev)
1364 		    && sets_cc0_p (PATTERN (prev))
1365 		    && (next = try_combine (insn, links->insn,
1366 					    prev, NULL, &new_direct_jump_p,
1367 					    last_combined_insn)) != 0)
1368 		  goto retry;
1369 	    }
1370 
1371 	  /* Try combining an insn with two different insns whose results it
1372 	     uses.  */
1373 	  if (max_combine >= 3)
1374 	    FOR_EACH_LOG_LINK (links, insn)
1375 	      for (nextlinks = links->next; nextlinks;
1376 		   nextlinks = nextlinks->next)
1377 		if ((next = try_combine (insn, links->insn,
1378 					 nextlinks->insn, NULL,
1379 					 &new_direct_jump_p,
1380 					 last_combined_insn)) != 0)
1381 
1382 		  {
1383 		    statistics_counter_event (cfun, "three-insn combine", 1);
1384 		    goto retry;
1385 		  }
1386 
1387 	  /* Try four-instruction combinations.  */
1388 	  if (max_combine >= 4)
1389 	    FOR_EACH_LOG_LINK (links, insn)
1390 	      {
1391 		struct insn_link *next1;
1392 		rtx_insn *link = links->insn;
1393 
1394 		/* If the linked insn has been replaced by a note, then there
1395 		   is no point in pursuing this chain any further.  */
1396 		if (NOTE_P (link))
1397 		  continue;
1398 
1399 		FOR_EACH_LOG_LINK (next1, link)
1400 		  {
1401 		    rtx_insn *link1 = next1->insn;
1402 		    if (NOTE_P (link1))
1403 		      continue;
1404 		    /* I0 -> I1 -> I2 -> I3.  */
1405 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1406 		      if ((next = try_combine (insn, link, link1,
1407 					       nextlinks->insn,
1408 					       &new_direct_jump_p,
1409 					       last_combined_insn)) != 0)
1410 			{
1411 			  statistics_counter_event (cfun, "four-insn combine", 1);
1412 			  goto retry;
1413 			}
1414 		    /* I0, I1 -> I2, I2 -> I3.  */
1415 		    for (nextlinks = next1->next; nextlinks;
1416 			 nextlinks = nextlinks->next)
1417 		      if ((next = try_combine (insn, link, link1,
1418 					       nextlinks->insn,
1419 					       &new_direct_jump_p,
1420 					       last_combined_insn)) != 0)
1421 			{
1422 			  statistics_counter_event (cfun, "four-insn combine", 1);
1423 			  goto retry;
1424 			}
1425 		  }
1426 
1427 		for (next1 = links->next; next1; next1 = next1->next)
1428 		  {
1429 		    rtx_insn *link1 = next1->insn;
1430 		    if (NOTE_P (link1))
1431 		      continue;
1432 		    /* I0 -> I2; I1, I2 -> I3.  */
1433 		    FOR_EACH_LOG_LINK (nextlinks, link)
1434 		      if ((next = try_combine (insn, link, link1,
1435 					       nextlinks->insn,
1436 					       &new_direct_jump_p,
1437 					       last_combined_insn)) != 0)
1438 			{
1439 			  statistics_counter_event (cfun, "four-insn combine", 1);
1440 			  goto retry;
1441 			}
1442 		    /* I0 -> I1; I1, I2 -> I3.  */
1443 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1444 		      if ((next = try_combine (insn, link, link1,
1445 					       nextlinks->insn,
1446 					       &new_direct_jump_p,
1447 					       last_combined_insn)) != 0)
1448 			{
1449 			  statistics_counter_event (cfun, "four-insn combine", 1);
1450 			  goto retry;
1451 			}
1452 		  }
1453 	      }
1454 
1455 	  /* Try this insn with each REG_EQUAL note it links back to.  */
1456 	  FOR_EACH_LOG_LINK (links, insn)
1457 	    {
1458 	      rtx set, note;
1459 	      rtx_insn *temp = links->insn;
1460 	      if ((set = single_set (temp)) != 0
1461 		  && (note = find_reg_equal_equiv_note (temp)) != 0
1462 		  && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1463 		  /* Avoid using a register that may already been marked
1464 		     dead by an earlier instruction.  */
1465 		  && ! unmentioned_reg_p (note, SET_SRC (set))
1466 		  && (GET_MODE (note) == VOIDmode
1467 		      ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1468 		      : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1469 			 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1470 			     || (GET_MODE (XEXP (SET_DEST (set), 0))
1471 				 == GET_MODE (note))))))
1472 		{
1473 		  /* Temporarily replace the set's source with the
1474 		     contents of the REG_EQUAL note.  The insn will
1475 		     be deleted or recognized by try_combine.  */
1476 		  rtx orig_src = SET_SRC (set);
1477 		  rtx orig_dest = SET_DEST (set);
1478 		  if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1479 		    SET_DEST (set) = XEXP (SET_DEST (set), 0);
1480 		  SET_SRC (set) = note;
1481 		  i2mod = temp;
1482 		  i2mod_old_rhs = copy_rtx (orig_src);
1483 		  i2mod_new_rhs = copy_rtx (note);
1484 		  next = try_combine (insn, i2mod, NULL, NULL,
1485 				      &new_direct_jump_p,
1486 				      last_combined_insn);
1487 		  i2mod = NULL;
1488 		  if (next)
1489 		    {
1490 		      statistics_counter_event (cfun, "insn-with-note combine", 1);
1491 		      goto retry;
1492 		    }
1493 		  SET_SRC (set) = orig_src;
1494 		  SET_DEST (set) = orig_dest;
1495 		}
1496 	    }
1497 
1498 	  if (!NOTE_P (insn))
1499 	    record_dead_and_set_regs (insn);
1500 
1501 retry:
1502 	  ;
1503 	}
1504     }
1505 
1506   default_rtl_profile ();
1507   clear_bb_flags ();
1508   new_direct_jump_p |= purge_all_dead_edges ();
1509   new_direct_jump_p |= delete_noop_moves ();
1510 
1511   /* Clean up.  */
1512   obstack_free (&insn_link_obstack, NULL);
1513   free (uid_log_links);
1514   free (uid_insn_cost);
1515   reg_stat.release ();
1516 
1517   {
1518     struct undo *undo, *next;
1519     for (undo = undobuf.frees; undo; undo = next)
1520       {
1521 	next = undo->next;
1522 	free (undo);
1523       }
1524     undobuf.frees = 0;
1525   }
1526 
1527   total_attempts += combine_attempts;
1528   total_merges += combine_merges;
1529   total_extras += combine_extras;
1530   total_successes += combine_successes;
1531 
1532   nonzero_sign_valid = 0;
1533   rtl_hooks = general_rtl_hooks;
1534 
1535   /* Make recognizer allow volatile MEMs again.  */
1536   init_recog ();
1537 
1538   return new_direct_jump_p;
1539 }
1540 
1541 /* Wipe the last_xxx fields of reg_stat in preparation for another pass.  */
1542 
1543 static void
1544 init_reg_last (void)
1545 {
1546   unsigned int i;
1547   reg_stat_type *p;
1548 
1549   FOR_EACH_VEC_ELT (reg_stat, i, p)
1550     memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1551 }
1552 
1553 /* Set up any promoted values for incoming argument registers.  */
1554 
1555 static void
1556 setup_incoming_promotions (rtx_insn *first)
1557 {
1558   tree arg;
1559   bool strictly_local = false;
1560 
1561   for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1562        arg = DECL_CHAIN (arg))
1563     {
1564       rtx x, reg = DECL_INCOMING_RTL (arg);
1565       int uns1, uns3;
1566       machine_mode mode1, mode2, mode3, mode4;
1567 
1568       /* Only continue if the incoming argument is in a register.  */
1569       if (!REG_P (reg))
1570 	continue;
1571 
1572       /* Determine, if possible, whether all call sites of the current
1573          function lie within the current compilation unit.  (This does
1574 	 take into account the exporting of a function via taking its
1575 	 address, and so forth.)  */
1576       strictly_local = cgraph_node::local_info (current_function_decl)->local;
1577 
1578       /* The mode and signedness of the argument before any promotions happen
1579          (equal to the mode of the pseudo holding it at that stage).  */
1580       mode1 = TYPE_MODE (TREE_TYPE (arg));
1581       uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1582 
1583       /* The mode and signedness of the argument after any source language and
1584          TARGET_PROMOTE_PROTOTYPES-driven promotions.  */
1585       mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1586       uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1587 
1588       /* The mode and signedness of the argument as it is actually passed,
1589          see assign_parm_setup_reg in function.c.  */
1590       mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1591 				     TREE_TYPE (cfun->decl), 0);
1592 
1593       /* The mode of the register in which the argument is being passed.  */
1594       mode4 = GET_MODE (reg);
1595 
1596       /* Eliminate sign extensions in the callee when:
1597 	 (a) A mode promotion has occurred;  */
1598       if (mode1 == mode3)
1599 	continue;
1600       /* (b) The mode of the register is the same as the mode of
1601 	     the argument as it is passed; */
1602       if (mode3 != mode4)
1603 	continue;
1604       /* (c) There's no language level extension;  */
1605       if (mode1 == mode2)
1606 	;
1607       /* (c.1) All callers are from the current compilation unit.  If that's
1608 	 the case we don't have to rely on an ABI, we only have to know
1609 	 what we're generating right now, and we know that we will do the
1610 	 mode1 to mode2 promotion with the given sign.  */
1611       else if (!strictly_local)
1612 	continue;
1613       /* (c.2) The combination of the two promotions is useful.  This is
1614 	 true when the signs match, or if the first promotion is unsigned.
1615 	 In the later case, (sign_extend (zero_extend x)) is the same as
1616 	 (zero_extend (zero_extend x)), so make sure to force UNS3 true.  */
1617       else if (uns1)
1618 	uns3 = true;
1619       else if (uns3)
1620 	continue;
1621 
1622       /* Record that the value was promoted from mode1 to mode3,
1623 	 so that any sign extension at the head of the current
1624 	 function may be eliminated.  */
1625       x = gen_rtx_CLOBBER (mode1, const0_rtx);
1626       x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1627       record_value_for_reg (reg, first, x);
1628     }
1629 }
1630 
1631 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1632    that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1633    because some machines (maybe most) will actually do the sign-extension and
1634    this is the conservative approach.
1635 
1636    ??? For 2.5, try to tighten up the MD files in this regard instead of this
1637    kludge.  */
1638 
1639 static rtx
1640 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1641 {
1642   if (GET_MODE_PRECISION (mode) < prec
1643       && CONST_INT_P (src)
1644       && INTVAL (src) > 0
1645       && val_signbit_known_set_p (mode, INTVAL (src)))
1646     src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (mode));
1647 
1648   return src;
1649 }
1650 
1651 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1652    and SET.  */
1653 
1654 static void
1655 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1656 			   rtx x)
1657 {
1658   rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1659   unsigned HOST_WIDE_INT bits = 0;
1660   rtx reg_equal = NULL, src = SET_SRC (set);
1661   unsigned int num = 0;
1662 
1663   if (reg_equal_note)
1664     reg_equal = XEXP (reg_equal_note, 0);
1665 
1666   if (SHORT_IMMEDIATES_SIGN_EXTEND)
1667     {
1668       src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1669       if (reg_equal)
1670 	reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1671     }
1672 
1673   /* Don't call nonzero_bits if it cannot change anything.  */
1674   if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1675     {
1676       bits = nonzero_bits (src, nonzero_bits_mode);
1677       if (reg_equal && bits)
1678 	bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1679       rsp->nonzero_bits |= bits;
1680     }
1681 
1682   /* Don't call num_sign_bit_copies if it cannot change anything.  */
1683   if (rsp->sign_bit_copies != 1)
1684     {
1685       num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1686       if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1687 	{
1688 	  unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1689 	  if (num == 0 || numeq > num)
1690 	    num = numeq;
1691 	}
1692       if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1693 	rsp->sign_bit_copies = num;
1694     }
1695 }
1696 
1697 /* Called via note_stores.  If X is a pseudo that is narrower than
1698    HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1699 
1700    If we are setting only a portion of X and we can't figure out what
1701    portion, assume all bits will be used since we don't know what will
1702    be happening.
1703 
1704    Similarly, set how many bits of X are known to be copies of the sign bit
1705    at all locations in the function.  This is the smallest number implied
1706    by any set of X.  */
1707 
1708 static void
1709 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1710 {
1711   rtx_insn *insn = (rtx_insn *) data;
1712 
1713   if (REG_P (x)
1714       && REGNO (x) >= FIRST_PSEUDO_REGISTER
1715       /* If this register is undefined at the start of the file, we can't
1716 	 say what its contents were.  */
1717       && ! REGNO_REG_SET_P
1718 	   (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1719       && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1720     {
1721       reg_stat_type *rsp = &reg_stat[REGNO (x)];
1722 
1723       if (set == 0 || GET_CODE (set) == CLOBBER)
1724 	{
1725 	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1726 	  rsp->sign_bit_copies = 1;
1727 	  return;
1728 	}
1729 
1730       /* If this register is being initialized using itself, and the
1731 	 register is uninitialized in this basic block, and there are
1732 	 no LOG_LINKS which set the register, then part of the
1733 	 register is uninitialized.  In that case we can't assume
1734 	 anything about the number of nonzero bits.
1735 
1736 	 ??? We could do better if we checked this in
1737 	 reg_{nonzero_bits,num_sign_bit_copies}_for_combine.  Then we
1738 	 could avoid making assumptions about the insn which initially
1739 	 sets the register, while still using the information in other
1740 	 insns.  We would have to be careful to check every insn
1741 	 involved in the combination.  */
1742 
1743       if (insn
1744 	  && reg_referenced_p (x, PATTERN (insn))
1745 	  && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1746 			       REGNO (x)))
1747 	{
1748 	  struct insn_link *link;
1749 
1750 	  FOR_EACH_LOG_LINK (link, insn)
1751 	    if (dead_or_set_p (link->insn, x))
1752 	      break;
1753 	  if (!link)
1754 	    {
1755 	      rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1756 	      rsp->sign_bit_copies = 1;
1757 	      return;
1758 	    }
1759 	}
1760 
1761       /* If this is a complex assignment, see if we can convert it into a
1762 	 simple assignment.  */
1763       set = expand_field_assignment (set);
1764 
1765       /* If this is a simple assignment, or we have a paradoxical SUBREG,
1766 	 set what we know about X.  */
1767 
1768       if (SET_DEST (set) == x
1769 	  || (paradoxical_subreg_p (SET_DEST (set))
1770 	      && SUBREG_REG (SET_DEST (set)) == x))
1771 	update_rsp_from_reg_equal (rsp, insn, set, x);
1772       else
1773 	{
1774 	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1775 	  rsp->sign_bit_copies = 1;
1776 	}
1777     }
1778 }
1779 
1780 /* See if INSN can be combined into I3.  PRED, PRED2, SUCC and SUCC2 are
1781    optionally insns that were previously combined into I3 or that will be
1782    combined into the merger of INSN and I3.  The order is PRED, PRED2,
1783    INSN, SUCC, SUCC2, I3.
1784 
1785    Return 0 if the combination is not allowed for any reason.
1786 
1787    If the combination is allowed, *PDEST will be set to the single
1788    destination of INSN and *PSRC to the single source, and this function
1789    will return 1.  */
1790 
1791 static int
1792 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1793 	       rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1794 	       rtx *pdest, rtx *psrc)
1795 {
1796   int i;
1797   const_rtx set = 0;
1798   rtx src, dest;
1799   rtx_insn *p;
1800   rtx link;
1801   bool all_adjacent = true;
1802   int (*is_volatile_p) (const_rtx);
1803 
1804   if (succ)
1805     {
1806       if (succ2)
1807 	{
1808 	  if (next_active_insn (succ2) != i3)
1809 	    all_adjacent = false;
1810 	  if (next_active_insn (succ) != succ2)
1811 	    all_adjacent = false;
1812 	}
1813       else if (next_active_insn (succ) != i3)
1814 	all_adjacent = false;
1815       if (next_active_insn (insn) != succ)
1816 	all_adjacent = false;
1817     }
1818   else if (next_active_insn (insn) != i3)
1819     all_adjacent = false;
1820 
1821   /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1822      or a PARALLEL consisting of such a SET and CLOBBERs.
1823 
1824      If INSN has CLOBBER parallel parts, ignore them for our processing.
1825      By definition, these happen during the execution of the insn.  When it
1826      is merged with another insn, all bets are off.  If they are, in fact,
1827      needed and aren't also supplied in I3, they may be added by
1828      recog_for_combine.  Otherwise, it won't match.
1829 
1830      We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1831      note.
1832 
1833      Get the source and destination of INSN.  If more than one, can't
1834      combine.  */
1835 
1836   if (GET_CODE (PATTERN (insn)) == SET)
1837     set = PATTERN (insn);
1838   else if (GET_CODE (PATTERN (insn)) == PARALLEL
1839 	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1840     {
1841       for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1842 	{
1843 	  rtx elt = XVECEXP (PATTERN (insn), 0, i);
1844 
1845 	  switch (GET_CODE (elt))
1846 	    {
1847 	    /* This is important to combine floating point insns
1848 	       for the SH4 port.  */
1849 	    case USE:
1850 	      /* Combining an isolated USE doesn't make sense.
1851 		 We depend here on combinable_i3pat to reject them.  */
1852 	      /* The code below this loop only verifies that the inputs of
1853 		 the SET in INSN do not change.  We call reg_set_between_p
1854 		 to verify that the REG in the USE does not change between
1855 		 I3 and INSN.
1856 		 If the USE in INSN was for a pseudo register, the matching
1857 		 insn pattern will likely match any register; combining this
1858 		 with any other USE would only be safe if we knew that the
1859 		 used registers have identical values, or if there was
1860 		 something to tell them apart, e.g. different modes.  For
1861 		 now, we forgo such complicated tests and simply disallow
1862 		 combining of USES of pseudo registers with any other USE.  */
1863 	      if (REG_P (XEXP (elt, 0))
1864 		  && GET_CODE (PATTERN (i3)) == PARALLEL)
1865 		{
1866 		  rtx i3pat = PATTERN (i3);
1867 		  int i = XVECLEN (i3pat, 0) - 1;
1868 		  unsigned int regno = REGNO (XEXP (elt, 0));
1869 
1870 		  do
1871 		    {
1872 		      rtx i3elt = XVECEXP (i3pat, 0, i);
1873 
1874 		      if (GET_CODE (i3elt) == USE
1875 			  && REG_P (XEXP (i3elt, 0))
1876 			  && (REGNO (XEXP (i3elt, 0)) == regno
1877 			      ? reg_set_between_p (XEXP (elt, 0),
1878 						   PREV_INSN (insn), i3)
1879 			      : regno >= FIRST_PSEUDO_REGISTER))
1880 			return 0;
1881 		    }
1882 		  while (--i >= 0);
1883 		}
1884 	      break;
1885 
1886 	      /* We can ignore CLOBBERs.  */
1887 	    case CLOBBER:
1888 	      break;
1889 
1890 	    case SET:
1891 	      /* Ignore SETs whose result isn't used but not those that
1892 		 have side-effects.  */
1893 	      if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1894 		  && insn_nothrow_p (insn)
1895 		  && !side_effects_p (elt))
1896 		break;
1897 
1898 	      /* If we have already found a SET, this is a second one and
1899 		 so we cannot combine with this insn.  */
1900 	      if (set)
1901 		return 0;
1902 
1903 	      set = elt;
1904 	      break;
1905 
1906 	    default:
1907 	      /* Anything else means we can't combine.  */
1908 	      return 0;
1909 	    }
1910 	}
1911 
1912       if (set == 0
1913 	  /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1914 	     so don't do anything with it.  */
1915 	  || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1916 	return 0;
1917     }
1918   else
1919     return 0;
1920 
1921   if (set == 0)
1922     return 0;
1923 
1924   /* The simplification in expand_field_assignment may call back to
1925      get_last_value, so set safe guard here.  */
1926   subst_low_luid = DF_INSN_LUID (insn);
1927 
1928   set = expand_field_assignment (set);
1929   src = SET_SRC (set), dest = SET_DEST (set);
1930 
1931   /* Do not eliminate user-specified register if it is in an
1932      asm input because we may break the register asm usage defined
1933      in GCC manual if allow to do so.
1934      Be aware that this may cover more cases than we expect but this
1935      should be harmless.  */
1936   if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1937       && extract_asm_operands (PATTERN (i3)))
1938     return 0;
1939 
1940   /* Don't eliminate a store in the stack pointer.  */
1941   if (dest == stack_pointer_rtx
1942       /* Don't combine with an insn that sets a register to itself if it has
1943 	 a REG_EQUAL note.  This may be part of a LIBCALL sequence.  */
1944       || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1945       /* Can't merge an ASM_OPERANDS.  */
1946       || GET_CODE (src) == ASM_OPERANDS
1947       /* Can't merge a function call.  */
1948       || GET_CODE (src) == CALL
1949       /* Don't eliminate a function call argument.  */
1950       || (CALL_P (i3)
1951 	  && (find_reg_fusage (i3, USE, dest)
1952 	      || (REG_P (dest)
1953 		  && REGNO (dest) < FIRST_PSEUDO_REGISTER
1954 		  && global_regs[REGNO (dest)])))
1955       /* Don't substitute into an incremented register.  */
1956       || FIND_REG_INC_NOTE (i3, dest)
1957       || (succ && FIND_REG_INC_NOTE (succ, dest))
1958       || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1959       /* Don't substitute into a non-local goto, this confuses CFG.  */
1960       || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1961       /* Make sure that DEST is not used after INSN but before SUCC, or
1962 	 after SUCC and before SUCC2, or after SUCC2 but before I3.  */
1963       || (!all_adjacent
1964 	  && ((succ2
1965 	       && (reg_used_between_p (dest, succ2, i3)
1966 		   || reg_used_between_p (dest, succ, succ2)))
1967 	      || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1968 	      || (succ
1969 		  /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1970 		     that case SUCC is not in the insn stream, so use SUCC2
1971 		     instead for this test.  */
1972 		  && reg_used_between_p (dest, insn,
1973 					 succ2
1974 					 && INSN_UID (succ) == INSN_UID (succ2)
1975 					 ? succ2 : succ))))
1976       /* Make sure that the value that is to be substituted for the register
1977 	 does not use any registers whose values alter in between.  However,
1978 	 If the insns are adjacent, a use can't cross a set even though we
1979 	 think it might (this can happen for a sequence of insns each setting
1980 	 the same destination; last_set of that register might point to
1981 	 a NOTE).  If INSN has a REG_EQUIV note, the register is always
1982 	 equivalent to the memory so the substitution is valid even if there
1983 	 are intervening stores.  Also, don't move a volatile asm or
1984 	 UNSPEC_VOLATILE across any other insns.  */
1985       || (! all_adjacent
1986 	  && (((!MEM_P (src)
1987 		|| ! find_reg_note (insn, REG_EQUIV, src))
1988 	       && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1989 	      || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1990 	      || GET_CODE (src) == UNSPEC_VOLATILE))
1991       /* Don't combine across a CALL_INSN, because that would possibly
1992 	 change whether the life span of some REGs crosses calls or not,
1993 	 and it is a pain to update that information.
1994 	 Exception: if source is a constant, moving it later can't hurt.
1995 	 Accept that as a special case.  */
1996       || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1997     return 0;
1998 
1999   /* DEST must either be a REG or CC0.  */
2000   if (REG_P (dest))
2001     {
2002       /* If register alignment is being enforced for multi-word items in all
2003 	 cases except for parameters, it is possible to have a register copy
2004 	 insn referencing a hard register that is not allowed to contain the
2005 	 mode being copied and which would not be valid as an operand of most
2006 	 insns.  Eliminate this problem by not combining with such an insn.
2007 
2008 	 Also, on some machines we don't want to extend the life of a hard
2009 	 register.  */
2010 
2011       if (REG_P (src)
2012 	  && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2013 	       && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
2014 	      /* Don't extend the life of a hard register unless it is
2015 		 user variable (if we have few registers) or it can't
2016 		 fit into the desired register (meaning something special
2017 		 is going on).
2018 		 Also avoid substituting a return register into I3, because
2019 		 reload can't handle a conflict with constraints of other
2020 		 inputs.  */
2021 	      || (REGNO (src) < FIRST_PSEUDO_REGISTER
2022 		  && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
2023 	return 0;
2024     }
2025   else if (GET_CODE (dest) != CC0)
2026     return 0;
2027 
2028 
2029   if (GET_CODE (PATTERN (i3)) == PARALLEL)
2030     for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2031       if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2032 	{
2033 	  rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2034 
2035 	  /* If the clobber represents an earlyclobber operand, we must not
2036 	     substitute an expression containing the clobbered register.
2037 	     As we do not analyze the constraint strings here, we have to
2038 	     make the conservative assumption.  However, if the register is
2039 	     a fixed hard reg, the clobber cannot represent any operand;
2040 	     we leave it up to the machine description to either accept or
2041 	     reject use-and-clobber patterns.  */
2042 	  if (!REG_P (reg)
2043 	      || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2044 	      || !fixed_regs[REGNO (reg)])
2045 	    if (reg_overlap_mentioned_p (reg, src))
2046 	      return 0;
2047 	}
2048 
2049   /* If INSN contains anything volatile, or is an `asm' (whether volatile
2050      or not), reject, unless nothing volatile comes between it and I3 */
2051 
2052   if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2053     {
2054       /* Make sure neither succ nor succ2 contains a volatile reference.  */
2055       if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2056 	return 0;
2057       if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2058 	return 0;
2059       /* We'll check insns between INSN and I3 below.  */
2060     }
2061 
2062   /* If INSN is an asm, and DEST is a hard register, reject, since it has
2063      to be an explicit register variable, and was chosen for a reason.  */
2064 
2065   if (GET_CODE (src) == ASM_OPERANDS
2066       && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2067     return 0;
2068 
2069   /* If INSN contains volatile references (specifically volatile MEMs),
2070      we cannot combine across any other volatile references.
2071      Even if INSN doesn't contain volatile references, any intervening
2072      volatile insn might affect machine state.  */
2073 
2074   is_volatile_p = volatile_refs_p (PATTERN (insn))
2075     ? volatile_refs_p
2076     : volatile_insn_p;
2077 
2078   for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2079     if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2080       return 0;
2081 
2082   /* If INSN contains an autoincrement or autodecrement, make sure that
2083      register is not used between there and I3, and not already used in
2084      I3 either.  Neither must it be used in PRED or SUCC, if they exist.
2085      Also insist that I3 not be a jump; if it were one
2086      and the incremented register were spilled, we would lose.  */
2087 
2088   if (AUTO_INC_DEC)
2089     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2090       if (REG_NOTE_KIND (link) == REG_INC
2091 	  && (JUMP_P (i3)
2092 	      || reg_used_between_p (XEXP (link, 0), insn, i3)
2093 	      || (pred != NULL_RTX
2094 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2095 	      || (pred2 != NULL_RTX
2096 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2097 	      || (succ != NULL_RTX
2098 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2099 	      || (succ2 != NULL_RTX
2100 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2101 	      || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2102 	return 0;
2103 
2104   /* Don't combine an insn that follows a CC0-setting insn.
2105      An insn that uses CC0 must not be separated from the one that sets it.
2106      We do, however, allow I2 to follow a CC0-setting insn if that insn
2107      is passed as I1; in that case it will be deleted also.
2108      We also allow combining in this case if all the insns are adjacent
2109      because that would leave the two CC0 insns adjacent as well.
2110      It would be more logical to test whether CC0 occurs inside I1 or I2,
2111      but that would be much slower, and this ought to be equivalent.  */
2112 
2113   if (HAVE_cc0)
2114     {
2115       p = prev_nonnote_insn (insn);
2116       if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2117 	  && ! all_adjacent)
2118 	return 0;
2119     }
2120 
2121   /* If we get here, we have passed all the tests and the combination is
2122      to be allowed.  */
2123 
2124   *pdest = dest;
2125   *psrc = src;
2126 
2127   return 1;
2128 }
2129 
2130 /* LOC is the location within I3 that contains its pattern or the component
2131    of a PARALLEL of the pattern.  We validate that it is valid for combining.
2132 
2133    One problem is if I3 modifies its output, as opposed to replacing it
2134    entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2135    doing so would produce an insn that is not equivalent to the original insns.
2136 
2137    Consider:
2138 
2139 	 (set (reg:DI 101) (reg:DI 100))
2140 	 (set (subreg:SI (reg:DI 101) 0) <foo>)
2141 
2142    This is NOT equivalent to:
2143 
2144 	 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2145 		    (set (reg:DI 101) (reg:DI 100))])
2146 
2147    Not only does this modify 100 (in which case it might still be valid
2148    if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2149 
2150    We can also run into a problem if I2 sets a register that I1
2151    uses and I1 gets directly substituted into I3 (not via I2).  In that
2152    case, we would be getting the wrong value of I2DEST into I3, so we
2153    must reject the combination.  This case occurs when I2 and I1 both
2154    feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2155    If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2156    of a SET must prevent combination from occurring.  The same situation
2157    can occur for I0, in which case I0_NOT_IN_SRC is set.
2158 
2159    Before doing the above check, we first try to expand a field assignment
2160    into a set of logical operations.
2161 
2162    If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2163    we place a register that is both set and used within I3.  If more than one
2164    such register is detected, we fail.
2165 
2166    Return 1 if the combination is valid, zero otherwise.  */
2167 
2168 static int
2169 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2170 		  int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2171 {
2172   rtx x = *loc;
2173 
2174   if (GET_CODE (x) == SET)
2175     {
2176       rtx set = x ;
2177       rtx dest = SET_DEST (set);
2178       rtx src = SET_SRC (set);
2179       rtx inner_dest = dest;
2180       rtx subdest;
2181 
2182       while (GET_CODE (inner_dest) == STRICT_LOW_PART
2183 	     || GET_CODE (inner_dest) == SUBREG
2184 	     || GET_CODE (inner_dest) == ZERO_EXTRACT)
2185 	inner_dest = XEXP (inner_dest, 0);
2186 
2187       /* Check for the case where I3 modifies its output, as discussed
2188 	 above.  We don't want to prevent pseudos from being combined
2189 	 into the address of a MEM, so only prevent the combination if
2190 	 i1 or i2 set the same MEM.  */
2191       if ((inner_dest != dest &&
2192 	   (!MEM_P (inner_dest)
2193 	    || rtx_equal_p (i2dest, inner_dest)
2194 	    || (i1dest && rtx_equal_p (i1dest, inner_dest))
2195 	    || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2196 	   && (reg_overlap_mentioned_p (i2dest, inner_dest)
2197 	       || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2198 	       || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2199 
2200 	  /* This is the same test done in can_combine_p except we can't test
2201 	     all_adjacent; we don't have to, since this instruction will stay
2202 	     in place, thus we are not considering increasing the lifetime of
2203 	     INNER_DEST.
2204 
2205 	     Also, if this insn sets a function argument, combining it with
2206 	     something that might need a spill could clobber a previous
2207 	     function argument; the all_adjacent test in can_combine_p also
2208 	     checks this; here, we do a more specific test for this case.  */
2209 
2210 	  || (REG_P (inner_dest)
2211 	      && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2212 	      && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2213 					GET_MODE (inner_dest))))
2214 	  || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2215 	  || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2216 	return 0;
2217 
2218       /* If DEST is used in I3, it is being killed in this insn, so
2219 	 record that for later.  We have to consider paradoxical
2220 	 subregs here, since they kill the whole register, but we
2221 	 ignore partial subregs, STRICT_LOW_PART, etc.
2222 	 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2223 	 STACK_POINTER_REGNUM, since these are always considered to be
2224 	 live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
2225       subdest = dest;
2226       if (GET_CODE (subdest) == SUBREG
2227 	  && (GET_MODE_SIZE (GET_MODE (subdest))
2228 	      >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2229 	subdest = SUBREG_REG (subdest);
2230       if (pi3dest_killed
2231 	  && REG_P (subdest)
2232 	  && reg_referenced_p (subdest, PATTERN (i3))
2233 	  && REGNO (subdest) != FRAME_POINTER_REGNUM
2234 	  && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2235 	      || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2236 	  && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2237 	      || (REGNO (subdest) != ARG_POINTER_REGNUM
2238 		  || ! fixed_regs [REGNO (subdest)]))
2239 	  && REGNO (subdest) != STACK_POINTER_REGNUM)
2240 	{
2241 	  if (*pi3dest_killed)
2242 	    return 0;
2243 
2244 	  *pi3dest_killed = subdest;
2245 	}
2246     }
2247 
2248   else if (GET_CODE (x) == PARALLEL)
2249     {
2250       int i;
2251 
2252       for (i = 0; i < XVECLEN (x, 0); i++)
2253 	if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2254 				i1_not_in_src, i0_not_in_src, pi3dest_killed))
2255 	  return 0;
2256     }
2257 
2258   return 1;
2259 }
2260 
2261 /* Return 1 if X is an arithmetic expression that contains a multiplication
2262    and division.  We don't count multiplications by powers of two here.  */
2263 
2264 static int
2265 contains_muldiv (rtx x)
2266 {
2267   switch (GET_CODE (x))
2268     {
2269     case MOD:  case DIV:  case UMOD:  case UDIV:
2270       return 1;
2271 
2272     case MULT:
2273       return ! (CONST_INT_P (XEXP (x, 1))
2274 		&& pow2p_hwi (UINTVAL (XEXP (x, 1))));
2275     default:
2276       if (BINARY_P (x))
2277 	return contains_muldiv (XEXP (x, 0))
2278 	    || contains_muldiv (XEXP (x, 1));
2279 
2280       if (UNARY_P (x))
2281 	return contains_muldiv (XEXP (x, 0));
2282 
2283       return 0;
2284     }
2285 }
2286 
2287 /* Determine whether INSN can be used in a combination.  Return nonzero if
2288    not.  This is used in try_combine to detect early some cases where we
2289    can't perform combinations.  */
2290 
2291 static int
2292 cant_combine_insn_p (rtx_insn *insn)
2293 {
2294   rtx set;
2295   rtx src, dest;
2296 
2297   /* If this isn't really an insn, we can't do anything.
2298      This can occur when flow deletes an insn that it has merged into an
2299      auto-increment address.  */
2300   if (!NONDEBUG_INSN_P (insn))
2301     return 1;
2302 
2303   /* Never combine loads and stores involving hard regs that are likely
2304      to be spilled.  The register allocator can usually handle such
2305      reg-reg moves by tying.  If we allow the combiner to make
2306      substitutions of likely-spilled regs, reload might die.
2307      As an exception, we allow combinations involving fixed regs; these are
2308      not available to the register allocator so there's no risk involved.  */
2309 
2310   set = single_set (insn);
2311   if (! set)
2312     return 0;
2313   src = SET_SRC (set);
2314   dest = SET_DEST (set);
2315   if (GET_CODE (src) == SUBREG)
2316     src = SUBREG_REG (src);
2317   if (GET_CODE (dest) == SUBREG)
2318     dest = SUBREG_REG (dest);
2319   if (REG_P (src) && REG_P (dest)
2320       && ((HARD_REGISTER_P (src)
2321 	   && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2322 	   && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2323 	  || (HARD_REGISTER_P (dest)
2324 	      && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2325 	      && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2326     return 1;
2327 
2328   return 0;
2329 }
2330 
2331 struct likely_spilled_retval_info
2332 {
2333   unsigned regno, nregs;
2334   unsigned mask;
2335 };
2336 
2337 /* Called via note_stores by likely_spilled_retval_p.  Remove from info->mask
2338    hard registers that are known to be written to / clobbered in full.  */
2339 static void
2340 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2341 {
2342   struct likely_spilled_retval_info *const info =
2343     (struct likely_spilled_retval_info *) data;
2344   unsigned regno, nregs;
2345   unsigned new_mask;
2346 
2347   if (!REG_P (XEXP (set, 0)))
2348     return;
2349   regno = REGNO (x);
2350   if (regno >= info->regno + info->nregs)
2351     return;
2352   nregs = REG_NREGS (x);
2353   if (regno + nregs <= info->regno)
2354     return;
2355   new_mask = (2U << (nregs - 1)) - 1;
2356   if (regno < info->regno)
2357     new_mask >>= info->regno - regno;
2358   else
2359     new_mask <<= regno - info->regno;
2360   info->mask &= ~new_mask;
2361 }
2362 
2363 /* Return nonzero iff part of the return value is live during INSN, and
2364    it is likely spilled.  This can happen when more than one insn is needed
2365    to copy the return value, e.g. when we consider to combine into the
2366    second copy insn for a complex value.  */
2367 
2368 static int
2369 likely_spilled_retval_p (rtx_insn *insn)
2370 {
2371   rtx_insn *use = BB_END (this_basic_block);
2372   rtx reg;
2373   rtx_insn *p;
2374   unsigned regno, nregs;
2375   /* We assume here that no machine mode needs more than
2376      32 hard registers when the value overlaps with a register
2377      for which TARGET_FUNCTION_VALUE_REGNO_P is true.  */
2378   unsigned mask;
2379   struct likely_spilled_retval_info info;
2380 
2381   if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2382     return 0;
2383   reg = XEXP (PATTERN (use), 0);
2384   if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2385     return 0;
2386   regno = REGNO (reg);
2387   nregs = REG_NREGS (reg);
2388   if (nregs == 1)
2389     return 0;
2390   mask = (2U << (nregs - 1)) - 1;
2391 
2392   /* Disregard parts of the return value that are set later.  */
2393   info.regno = regno;
2394   info.nregs = nregs;
2395   info.mask = mask;
2396   for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2397     if (INSN_P (p))
2398       note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2399   mask = info.mask;
2400 
2401   /* Check if any of the (probably) live return value registers is
2402      likely spilled.  */
2403   nregs --;
2404   do
2405     {
2406       if ((mask & 1 << nregs)
2407 	  && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2408 	return 1;
2409     } while (nregs--);
2410   return 0;
2411 }
2412 
2413 /* Adjust INSN after we made a change to its destination.
2414 
2415    Changing the destination can invalidate notes that say something about
2416    the results of the insn and a LOG_LINK pointing to the insn.  */
2417 
2418 static void
2419 adjust_for_new_dest (rtx_insn *insn)
2420 {
2421   /* For notes, be conservative and simply remove them.  */
2422   remove_reg_equal_equiv_notes (insn);
2423 
2424   /* The new insn will have a destination that was previously the destination
2425      of an insn just above it.  Call distribute_links to make a LOG_LINK from
2426      the next use of that destination.  */
2427 
2428   rtx set = single_set (insn);
2429   gcc_assert (set);
2430 
2431   rtx reg = SET_DEST (set);
2432 
2433   while (GET_CODE (reg) == ZERO_EXTRACT
2434 	 || GET_CODE (reg) == STRICT_LOW_PART
2435 	 || GET_CODE (reg) == SUBREG)
2436     reg = XEXP (reg, 0);
2437   gcc_assert (REG_P (reg));
2438 
2439   distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2440 
2441   df_insn_rescan (insn);
2442 }
2443 
2444 /* Return TRUE if combine can reuse reg X in mode MODE.
2445    ADDED_SETS is nonzero if the original set is still required.  */
2446 static bool
2447 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2448 {
2449   unsigned int regno;
2450 
2451   if (!REG_P (x))
2452     return false;
2453 
2454   regno = REGNO (x);
2455   /* Allow hard registers if the new mode is legal, and occupies no more
2456      registers than the old mode.  */
2457   if (regno < FIRST_PSEUDO_REGISTER)
2458     return (HARD_REGNO_MODE_OK (regno, mode)
2459 	    && REG_NREGS (x) >= hard_regno_nregs[regno][mode]);
2460 
2461   /* Or a pseudo that is only used once.  */
2462   return (regno < reg_n_sets_max
2463 	  && REG_N_SETS (regno) == 1
2464 	  && !added_sets
2465 	  && !REG_USERVAR_P (x));
2466 }
2467 
2468 
2469 /* Check whether X, the destination of a set, refers to part of
2470    the register specified by REG.  */
2471 
2472 static bool
2473 reg_subword_p (rtx x, rtx reg)
2474 {
2475   /* Check that reg is an integer mode register.  */
2476   if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2477     return false;
2478 
2479   if (GET_CODE (x) == STRICT_LOW_PART
2480       || GET_CODE (x) == ZERO_EXTRACT)
2481     x = XEXP (x, 0);
2482 
2483   return GET_CODE (x) == SUBREG
2484 	 && SUBREG_REG (x) == reg
2485 	 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2486 }
2487 
2488 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2489    Note that the INSN should be deleted *after* removing dead edges, so
2490    that the kept edge is the fallthrough edge for a (set (pc) (pc))
2491    but not for a (set (pc) (label_ref FOO)).  */
2492 
2493 static void
2494 update_cfg_for_uncondjump (rtx_insn *insn)
2495 {
2496   basic_block bb = BLOCK_FOR_INSN (insn);
2497   gcc_assert (BB_END (bb) == insn);
2498 
2499   purge_dead_edges (bb);
2500 
2501   delete_insn (insn);
2502   if (EDGE_COUNT (bb->succs) == 1)
2503     {
2504       rtx_insn *insn;
2505 
2506       single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2507 
2508       /* Remove barriers from the footer if there are any.  */
2509       for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2510 	if (BARRIER_P (insn))
2511 	  {
2512 	    if (PREV_INSN (insn))
2513 	      SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2514 	    else
2515 	      BB_FOOTER (bb) = NEXT_INSN (insn);
2516 	    if (NEXT_INSN (insn))
2517 	      SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2518 	  }
2519 	else if (LABEL_P (insn))
2520 	  break;
2521     }
2522 }
2523 
2524 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2525    by an arbitrary number of CLOBBERs.  */
2526 static bool
2527 is_parallel_of_n_reg_sets (rtx pat, int n)
2528 {
2529   if (GET_CODE (pat) != PARALLEL)
2530     return false;
2531 
2532   int len = XVECLEN (pat, 0);
2533   if (len < n)
2534     return false;
2535 
2536   int i;
2537   for (i = 0; i < n; i++)
2538     if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2539 	|| !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2540       return false;
2541   for ( ; i < len; i++)
2542     if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2543 	|| XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2544       return false;
2545 
2546   return true;
2547 }
2548 
2549 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2550    CLOBBERs), can be split into individual SETs in that order, without
2551    changing semantics.  */
2552 static bool
2553 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2554 {
2555   if (!insn_nothrow_p (insn))
2556     return false;
2557 
2558   rtx pat = PATTERN (insn);
2559 
2560   int i, j;
2561   for (i = 0; i < n; i++)
2562     {
2563       if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2564 	return false;
2565 
2566       rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2567 
2568       for (j = i + 1; j < n; j++)
2569 	if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2570 	  return false;
2571     }
2572 
2573   return true;
2574 }
2575 
2576 /* Try to combine the insns I0, I1 and I2 into I3.
2577    Here I0, I1 and I2 appear earlier than I3.
2578    I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2579    I3.
2580 
2581    If we are combining more than two insns and the resulting insn is not
2582    recognized, try splitting it into two insns.  If that happens, I2 and I3
2583    are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2584    Otherwise, I0, I1 and I2 are pseudo-deleted.
2585 
2586    Return 0 if the combination does not work.  Then nothing is changed.
2587    If we did the combination, return the insn at which combine should
2588    resume scanning.
2589 
2590    Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2591    new direct jump instruction.
2592 
2593    LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2594    been I3 passed to an earlier try_combine within the same basic
2595    block.  */
2596 
2597 static rtx_insn *
2598 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2599 	     int *new_direct_jump_p, rtx_insn *last_combined_insn)
2600 {
2601   /* New patterns for I3 and I2, respectively.  */
2602   rtx newpat, newi2pat = 0;
2603   rtvec newpat_vec_with_clobbers = 0;
2604   int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2605   /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2606      dead.  */
2607   int added_sets_0, added_sets_1, added_sets_2;
2608   /* Total number of SETs to put into I3.  */
2609   int total_sets;
2610   /* Nonzero if I2's or I1's body now appears in I3.  */
2611   int i2_is_used = 0, i1_is_used = 0;
2612   /* INSN_CODEs for new I3, new I2, and user of condition code.  */
2613   int insn_code_number, i2_code_number = 0, other_code_number = 0;
2614   /* Contains I3 if the destination of I3 is used in its source, which means
2615      that the old life of I3 is being killed.  If that usage is placed into
2616      I2 and not in I3, a REG_DEAD note must be made.  */
2617   rtx i3dest_killed = 0;
2618   /* SET_DEST and SET_SRC of I2, I1 and I0.  */
2619   rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2620   /* Copy of SET_SRC of I1 and I0, if needed.  */
2621   rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2622   /* Set if I2DEST was reused as a scratch register.  */
2623   bool i2scratch = false;
2624   /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases.  */
2625   rtx i0pat = 0, i1pat = 0, i2pat = 0;
2626   /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC.  */
2627   int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2628   int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2629   int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2630   int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2631   /* Notes that must be added to REG_NOTES in I3 and I2.  */
2632   rtx new_i3_notes, new_i2_notes;
2633   /* Notes that we substituted I3 into I2 instead of the normal case.  */
2634   int i3_subst_into_i2 = 0;
2635   /* Notes that I1, I2 or I3 is a MULT operation.  */
2636   int have_mult = 0;
2637   int swap_i2i3 = 0;
2638   int changed_i3_dest = 0;
2639 
2640   int maxreg;
2641   rtx_insn *temp_insn;
2642   rtx temp_expr;
2643   struct insn_link *link;
2644   rtx other_pat = 0;
2645   rtx new_other_notes;
2646   int i;
2647 
2648   /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2649      never be).  */
2650   if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2651     return 0;
2652 
2653   /* Only try four-insn combinations when there's high likelihood of
2654      success.  Look for simple insns, such as loads of constants or
2655      binary operations involving a constant.  */
2656   if (i0)
2657     {
2658       int i;
2659       int ngood = 0;
2660       int nshift = 0;
2661       rtx set0, set3;
2662 
2663       if (!flag_expensive_optimizations)
2664 	return 0;
2665 
2666       for (i = 0; i < 4; i++)
2667 	{
2668 	  rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2669 	  rtx set = single_set (insn);
2670 	  rtx src;
2671 	  if (!set)
2672 	    continue;
2673 	  src = SET_SRC (set);
2674 	  if (CONSTANT_P (src))
2675 	    {
2676 	      ngood += 2;
2677 	      break;
2678 	    }
2679 	  else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2680 	    ngood++;
2681 	  else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2682 		   || GET_CODE (src) == LSHIFTRT)
2683 	    nshift++;
2684 	}
2685 
2686       /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2687 	 are likely manipulating its value.  Ideally we'll be able to combine
2688 	 all four insns into a bitfield insertion of some kind.
2689 
2690 	 Note the source in I0 might be inside a sign/zero extension and the
2691 	 memory modes in I0 and I3 might be different.  So extract the address
2692 	 from the destination of I3 and search for it in the source of I0.
2693 
2694 	 In the event that there's a match but the source/dest do not actually
2695 	 refer to the same memory, the worst that happens is we try some
2696 	 combinations that we wouldn't have otherwise.  */
2697       if ((set0 = single_set (i0))
2698 	  /* Ensure the source of SET0 is a MEM, possibly buried inside
2699 	     an extension.  */
2700 	  && (GET_CODE (SET_SRC (set0)) == MEM
2701 	      || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2702 		   || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2703 		  && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2704 	  && (set3 = single_set (i3))
2705 	  /* Ensure the destination of SET3 is a MEM.  */
2706 	  && GET_CODE (SET_DEST (set3)) == MEM
2707 	  /* Would it be better to extract the base address for the MEM
2708 	     in SET3 and look for that?  I don't have cases where it matters
2709 	     but I could envision such cases.  */
2710 	  && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2711 	ngood += 2;
2712 
2713       if (ngood < 2 && nshift < 2)
2714 	return 0;
2715     }
2716 
2717   /* Exit early if one of the insns involved can't be used for
2718      combinations.  */
2719   if (CALL_P (i2)
2720       || (i1 && CALL_P (i1))
2721       || (i0 && CALL_P (i0))
2722       || cant_combine_insn_p (i3)
2723       || cant_combine_insn_p (i2)
2724       || (i1 && cant_combine_insn_p (i1))
2725       || (i0 && cant_combine_insn_p (i0))
2726       || likely_spilled_retval_p (i3))
2727     return 0;
2728 
2729   combine_attempts++;
2730   undobuf.other_insn = 0;
2731 
2732   /* Reset the hard register usage information.  */
2733   CLEAR_HARD_REG_SET (newpat_used_regs);
2734 
2735   if (dump_file && (dump_flags & TDF_DETAILS))
2736     {
2737       if (i0)
2738 	fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2739 		 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2740       else if (i1)
2741 	fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2742 		 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2743       else
2744 	fprintf (dump_file, "\nTrying %d -> %d:\n",
2745 		 INSN_UID (i2), INSN_UID (i3));
2746     }
2747 
2748   /* If multiple insns feed into one of I2 or I3, they can be in any
2749      order.  To simplify the code below, reorder them in sequence.  */
2750   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2751     std::swap (i0, i2);
2752   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2753     std::swap (i0, i1);
2754   if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2755     std::swap (i1, i2);
2756 
2757   added_links_insn = 0;
2758 
2759   /* First check for one important special case that the code below will
2760      not handle.  Namely, the case where I1 is zero, I2 is a PARALLEL
2761      and I3 is a SET whose SET_SRC is a SET_DEST in I2.  In that case,
2762      we may be able to replace that destination with the destination of I3.
2763      This occurs in the common code where we compute both a quotient and
2764      remainder into a structure, in which case we want to do the computation
2765      directly into the structure to avoid register-register copies.
2766 
2767      Note that this case handles both multiple sets in I2 and also cases
2768      where I2 has a number of CLOBBERs inside the PARALLEL.
2769 
2770      We make very conservative checks below and only try to handle the
2771      most common cases of this.  For example, we only handle the case
2772      where I2 and I3 are adjacent to avoid making difficult register
2773      usage tests.  */
2774 
2775   if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2776       && REG_P (SET_SRC (PATTERN (i3)))
2777       && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2778       && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2779       && GET_CODE (PATTERN (i2)) == PARALLEL
2780       && ! side_effects_p (SET_DEST (PATTERN (i3)))
2781       /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2782 	 below would need to check what is inside (and reg_overlap_mentioned_p
2783 	 doesn't support those codes anyway).  Don't allow those destinations;
2784 	 the resulting insn isn't likely to be recognized anyway.  */
2785       && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2786       && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2787       && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2788 				    SET_DEST (PATTERN (i3)))
2789       && next_active_insn (i2) == i3)
2790     {
2791       rtx p2 = PATTERN (i2);
2792 
2793       /* Make sure that the destination of I3,
2794 	 which we are going to substitute into one output of I2,
2795 	 is not used within another output of I2.  We must avoid making this:
2796 	 (parallel [(set (mem (reg 69)) ...)
2797 		    (set (reg 69) ...)])
2798 	 which is not well-defined as to order of actions.
2799 	 (Besides, reload can't handle output reloads for this.)
2800 
2801 	 The problem can also happen if the dest of I3 is a memory ref,
2802 	 if another dest in I2 is an indirect memory ref.
2803 
2804 	 Neither can this PARALLEL be an asm.  We do not allow combining
2805 	 that usually (see can_combine_p), so do not here either.  */
2806       bool ok = true;
2807       for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2808 	{
2809 	  if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2810 	       || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2811 	      && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2812 					  SET_DEST (XVECEXP (p2, 0, i))))
2813 	    ok = false;
2814 	  else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2815 		   && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2816 	    ok = false;
2817 	}
2818 
2819       if (ok)
2820 	for (i = 0; i < XVECLEN (p2, 0); i++)
2821 	  if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2822 	      && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2823 	    {
2824 	      combine_merges++;
2825 
2826 	      subst_insn = i3;
2827 	      subst_low_luid = DF_INSN_LUID (i2);
2828 
2829 	      added_sets_2 = added_sets_1 = added_sets_0 = 0;
2830 	      i2src = SET_SRC (XVECEXP (p2, 0, i));
2831 	      i2dest = SET_DEST (XVECEXP (p2, 0, i));
2832 	      i2dest_killed = dead_or_set_p (i2, i2dest);
2833 
2834 	      /* Replace the dest in I2 with our dest and make the resulting
2835 		 insn the new pattern for I3.  Then skip to where we validate
2836 		 the pattern.  Everything was set up above.  */
2837 	      SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2838 	      newpat = p2;
2839 	      i3_subst_into_i2 = 1;
2840 	      goto validate_replacement;
2841 	    }
2842     }
2843 
2844   /* If I2 is setting a pseudo to a constant and I3 is setting some
2845      sub-part of it to another constant, merge them by making a new
2846      constant.  */
2847   if (i1 == 0
2848       && (temp_expr = single_set (i2)) != 0
2849       && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2850       && GET_CODE (PATTERN (i3)) == SET
2851       && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2852       && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2853     {
2854       rtx dest = SET_DEST (PATTERN (i3));
2855       int offset = -1;
2856       int width = 0;
2857 
2858       if (GET_CODE (dest) == ZERO_EXTRACT)
2859 	{
2860 	  if (CONST_INT_P (XEXP (dest, 1))
2861 	      && CONST_INT_P (XEXP (dest, 2)))
2862 	    {
2863 	      width = INTVAL (XEXP (dest, 1));
2864 	      offset = INTVAL (XEXP (dest, 2));
2865 	      dest = XEXP (dest, 0);
2866 	      if (BITS_BIG_ENDIAN)
2867 		offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2868 	    }
2869 	}
2870       else
2871 	{
2872 	  if (GET_CODE (dest) == STRICT_LOW_PART)
2873 	    dest = XEXP (dest, 0);
2874 	  width = GET_MODE_PRECISION (GET_MODE (dest));
2875 	  offset = 0;
2876 	}
2877 
2878       if (offset >= 0)
2879 	{
2880 	  /* If this is the low part, we're done.  */
2881 	  if (subreg_lowpart_p (dest))
2882 	    ;
2883 	  /* Handle the case where inner is twice the size of outer.  */
2884 	  else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr)))
2885 		   == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2886 	    offset += GET_MODE_PRECISION (GET_MODE (dest));
2887 	  /* Otherwise give up for now.  */
2888 	  else
2889 	    offset = -1;
2890 	}
2891 
2892       if (offset >= 0)
2893 	{
2894 	  rtx inner = SET_SRC (PATTERN (i3));
2895 	  rtx outer = SET_SRC (temp_expr);
2896 
2897 	  wide_int o
2898 	    = wi::insert (rtx_mode_t (outer, GET_MODE (SET_DEST (temp_expr))),
2899 			  rtx_mode_t (inner, GET_MODE (dest)),
2900 			  offset, width);
2901 
2902 	  combine_merges++;
2903 	  subst_insn = i3;
2904 	  subst_low_luid = DF_INSN_LUID (i2);
2905 	  added_sets_2 = added_sets_1 = added_sets_0 = 0;
2906 	  i2dest = SET_DEST (temp_expr);
2907 	  i2dest_killed = dead_or_set_p (i2, i2dest);
2908 
2909 	  /* Replace the source in I2 with the new constant and make the
2910 	     resulting insn the new pattern for I3.  Then skip to where we
2911 	     validate the pattern.  Everything was set up above.  */
2912 	  SUBST (SET_SRC (temp_expr),
2913 		 immed_wide_int_const (o, GET_MODE (SET_DEST (temp_expr))));
2914 
2915 	  newpat = PATTERN (i2);
2916 
2917           /* The dest of I3 has been replaced with the dest of I2.  */
2918           changed_i3_dest = 1;
2919 	  goto validate_replacement;
2920 	}
2921     }
2922 
2923   /* If we have no I1 and I2 looks like:
2924 	(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2925 		   (set Y OP)])
2926      make up a dummy I1 that is
2927 	(set Y OP)
2928      and change I2 to be
2929 	(set (reg:CC X) (compare:CC Y (const_int 0)))
2930 
2931      (We can ignore any trailing CLOBBERs.)
2932 
2933      This undoes a previous combination and allows us to match a branch-and-
2934      decrement insn.  */
2935 
2936   if (!HAVE_cc0 && i1 == 0
2937       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2938       && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2939 	  == MODE_CC)
2940       && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2941       && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2942       && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2943 		      SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2944       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2945       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2946     {
2947       /* We make I1 with the same INSN_UID as I2.  This gives it
2948 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
2949 	 never appear in the insn stream so giving it the same INSN_UID
2950 	 as I2 will not cause a problem.  */
2951 
2952       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2953 			 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2954 			 -1, NULL_RTX);
2955       INSN_UID (i1) = INSN_UID (i2);
2956 
2957       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2958       SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2959 	     SET_DEST (PATTERN (i1)));
2960       unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2961       SUBST_LINK (LOG_LINKS (i2),
2962 		  alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2963     }
2964 
2965   /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2966      make those two SETs separate I1 and I2 insns, and make an I0 that is
2967      the original I1.  */
2968   if (!HAVE_cc0 && i0 == 0
2969       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2970       && can_split_parallel_of_n_reg_sets (i2, 2)
2971       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2972       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
2973       && !find_reg_note (i2, REG_UNUSED, 0))
2974     {
2975       /* If there is no I1, there is no I0 either.  */
2976       i0 = i1;
2977 
2978       /* We make I1 with the same INSN_UID as I2.  This gives it
2979 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
2980 	 never appear in the insn stream so giving it the same INSN_UID
2981 	 as I2 will not cause a problem.  */
2982 
2983       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2984 			 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2985 			 -1, NULL_RTX);
2986       INSN_UID (i1) = INSN_UID (i2);
2987 
2988       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2989     }
2990 
2991   /* Verify that I2 and I1 are valid for combining.  */
2992   if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2993       || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
2994 				 &i1dest, &i1src))
2995       || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
2996 				 &i0dest, &i0src)))
2997     {
2998       undo_all ();
2999       return 0;
3000     }
3001 
3002   /* Record whether I2DEST is used in I2SRC and similarly for the other
3003      cases.  Knowing this will help in register status updating below.  */
3004   i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3005   i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3006   i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3007   i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3008   i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3009   i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3010   i2dest_killed = dead_or_set_p (i2, i2dest);
3011   i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3012   i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3013 
3014   /* For the earlier insns, determine which of the subsequent ones they
3015      feed.  */
3016   i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3017   i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3018   i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3019 			  : (!reg_overlap_mentioned_p (i1dest, i0dest)
3020 			     && reg_overlap_mentioned_p (i0dest, i2src))));
3021 
3022   /* Ensure that I3's pattern can be the destination of combines.  */
3023   if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3024 			  i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3025 			  i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3026 				 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3027 			  &i3dest_killed))
3028     {
3029       undo_all ();
3030       return 0;
3031     }
3032 
3033   /* See if any of the insns is a MULT operation.  Unless one is, we will
3034      reject a combination that is, since it must be slower.  Be conservative
3035      here.  */
3036   if (GET_CODE (i2src) == MULT
3037       || (i1 != 0 && GET_CODE (i1src) == MULT)
3038       || (i0 != 0 && GET_CODE (i0src) == MULT)
3039       || (GET_CODE (PATTERN (i3)) == SET
3040 	  && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3041     have_mult = 1;
3042 
3043   /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3044      We used to do this EXCEPT in one case: I3 has a post-inc in an
3045      output operand.  However, that exception can give rise to insns like
3046 	mov r3,(r3)+
3047      which is a famous insn on the PDP-11 where the value of r3 used as the
3048      source was model-dependent.  Avoid this sort of thing.  */
3049 
3050 #if 0
3051   if (!(GET_CODE (PATTERN (i3)) == SET
3052 	&& REG_P (SET_SRC (PATTERN (i3)))
3053 	&& MEM_P (SET_DEST (PATTERN (i3)))
3054 	&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3055 	    || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3056     /* It's not the exception.  */
3057 #endif
3058     if (AUTO_INC_DEC)
3059       {
3060 	rtx link;
3061 	for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3062 	  if (REG_NOTE_KIND (link) == REG_INC
3063 	      && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3064 		  || (i1 != 0
3065 		      && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3066 	    {
3067 	      undo_all ();
3068 	      return 0;
3069 	    }
3070       }
3071 
3072   /* See if the SETs in I1 or I2 need to be kept around in the merged
3073      instruction: whenever the value set there is still needed past I3.
3074      For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3075 
3076      For the SET in I1, we have two cases: if I1 and I2 independently feed
3077      into I3, the set in I1 needs to be kept around unless I1DEST dies
3078      or is set in I3.  Otherwise (if I1 feeds I2 which feeds I3), the set
3079      in I1 needs to be kept around unless I1DEST dies or is set in either
3080      I2 or I3.  The same considerations apply to I0.  */
3081 
3082   added_sets_2 = !dead_or_set_p (i3, i2dest);
3083 
3084   if (i1)
3085     added_sets_1 = !(dead_or_set_p (i3, i1dest)
3086 		     || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3087   else
3088     added_sets_1 = 0;
3089 
3090   if (i0)
3091     added_sets_0 =  !(dead_or_set_p (i3, i0dest)
3092 		      || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3093 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3094 			  && dead_or_set_p (i2, i0dest)));
3095   else
3096     added_sets_0 = 0;
3097 
3098   /* We are about to copy insns for the case where they need to be kept
3099      around.  Check that they can be copied in the merged instruction.  */
3100 
3101   if (targetm.cannot_copy_insn_p
3102       && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3103 	  || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3104 	  || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3105     {
3106       undo_all ();
3107       return 0;
3108     }
3109 
3110   /* If the set in I2 needs to be kept around, we must make a copy of
3111      PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3112      PATTERN (I2), we are only substituting for the original I1DEST, not into
3113      an already-substituted copy.  This also prevents making self-referential
3114      rtx.  If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3115      I2DEST.  */
3116 
3117   if (added_sets_2)
3118     {
3119       if (GET_CODE (PATTERN (i2)) == PARALLEL)
3120 	i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3121       else
3122 	i2pat = copy_rtx (PATTERN (i2));
3123     }
3124 
3125   if (added_sets_1)
3126     {
3127       if (GET_CODE (PATTERN (i1)) == PARALLEL)
3128 	i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3129       else
3130 	i1pat = copy_rtx (PATTERN (i1));
3131     }
3132 
3133   if (added_sets_0)
3134     {
3135       if (GET_CODE (PATTERN (i0)) == PARALLEL)
3136 	i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3137       else
3138 	i0pat = copy_rtx (PATTERN (i0));
3139     }
3140 
3141   combine_merges++;
3142 
3143   /* Substitute in the latest insn for the regs set by the earlier ones.  */
3144 
3145   maxreg = max_reg_num ();
3146 
3147   subst_insn = i3;
3148 
3149   /* Many machines that don't use CC0 have insns that can both perform an
3150      arithmetic operation and set the condition code.  These operations will
3151      be represented as a PARALLEL with the first element of the vector
3152      being a COMPARE of an arithmetic operation with the constant zero.
3153      The second element of the vector will set some pseudo to the result
3154      of the same arithmetic operation.  If we simplify the COMPARE, we won't
3155      match such a pattern and so will generate an extra insn.   Here we test
3156      for this case, where both the comparison and the operation result are
3157      needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3158      I2SRC.  Later we will make the PARALLEL that contains I2.  */
3159 
3160   if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3161       && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3162       && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3163       && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3164     {
3165       rtx newpat_dest;
3166       rtx *cc_use_loc = NULL;
3167       rtx_insn *cc_use_insn = NULL;
3168       rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3169       machine_mode compare_mode, orig_compare_mode;
3170       enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3171 
3172       newpat = PATTERN (i3);
3173       newpat_dest = SET_DEST (newpat);
3174       compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3175 
3176       if (undobuf.other_insn == 0
3177 	  && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3178 					    &cc_use_insn)))
3179 	{
3180 	  compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3181 	  compare_code = simplify_compare_const (compare_code,
3182 						 GET_MODE (i2dest), op0, &op1);
3183 	  target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3184 	}
3185 
3186       /* Do the rest only if op1 is const0_rtx, which may be the
3187 	 result of simplification.  */
3188       if (op1 == const0_rtx)
3189 	{
3190 	  /* If a single use of the CC is found, prepare to modify it
3191 	     when SELECT_CC_MODE returns a new CC-class mode, or when
3192 	     the above simplify_compare_const() returned a new comparison
3193 	     operator.  undobuf.other_insn is assigned the CC use insn
3194 	     when modifying it.  */
3195 	  if (cc_use_loc)
3196 	    {
3197 #ifdef SELECT_CC_MODE
3198 	      machine_mode new_mode
3199 		= SELECT_CC_MODE (compare_code, op0, op1);
3200 	      if (new_mode != orig_compare_mode
3201 		  && can_change_dest_mode (SET_DEST (newpat),
3202 					   added_sets_2, new_mode))
3203 		{
3204 		  unsigned int regno = REGNO (newpat_dest);
3205 		  compare_mode = new_mode;
3206 		  if (regno < FIRST_PSEUDO_REGISTER)
3207 		    newpat_dest = gen_rtx_REG (compare_mode, regno);
3208 		  else
3209 		    {
3210 		      SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3211 		      newpat_dest = regno_reg_rtx[regno];
3212 		    }
3213 		}
3214 #endif
3215 	      /* Cases for modifying the CC-using comparison.  */
3216 	      if (compare_code != orig_compare_code
3217 		  /* ??? Do we need to verify the zero rtx?  */
3218 		  && XEXP (*cc_use_loc, 1) == const0_rtx)
3219 		{
3220 		  /* Replace cc_use_loc with entire new RTX.  */
3221 		  SUBST (*cc_use_loc,
3222 			 gen_rtx_fmt_ee (compare_code, compare_mode,
3223 					 newpat_dest, const0_rtx));
3224 		  undobuf.other_insn = cc_use_insn;
3225 		}
3226 	      else if (compare_mode != orig_compare_mode)
3227 		{
3228 		  /* Just replace the CC reg with a new mode.  */
3229 		  SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3230 		  undobuf.other_insn = cc_use_insn;
3231 		}
3232 	    }
3233 
3234 	  /* Now we modify the current newpat:
3235 	     First, SET_DEST(newpat) is updated if the CC mode has been
3236 	     altered. For targets without SELECT_CC_MODE, this should be
3237 	     optimized away.  */
3238 	  if (compare_mode != orig_compare_mode)
3239 	    SUBST (SET_DEST (newpat), newpat_dest);
3240 	  /* This is always done to propagate i2src into newpat.  */
3241 	  SUBST (SET_SRC (newpat),
3242 		 gen_rtx_COMPARE (compare_mode, op0, op1));
3243 	  /* Create new version of i2pat if needed; the below PARALLEL
3244 	     creation needs this to work correctly.  */
3245 	  if (! rtx_equal_p (i2src, op0))
3246 	    i2pat = gen_rtx_SET (i2dest, op0);
3247 	  i2_is_used = 1;
3248 	}
3249     }
3250 
3251   if (i2_is_used == 0)
3252     {
3253       /* It is possible that the source of I2 or I1 may be performing
3254 	 an unneeded operation, such as a ZERO_EXTEND of something
3255 	 that is known to have the high part zero.  Handle that case
3256 	 by letting subst look at the inner insns.
3257 
3258 	 Another way to do this would be to have a function that tries
3259 	 to simplify a single insn instead of merging two or more
3260 	 insns.  We don't do this because of the potential of infinite
3261 	 loops and because of the potential extra memory required.
3262 	 However, doing it the way we are is a bit of a kludge and
3263 	 doesn't catch all cases.
3264 
3265 	 But only do this if -fexpensive-optimizations since it slows
3266 	 things down and doesn't usually win.
3267 
3268 	 This is not done in the COMPARE case above because the
3269 	 unmodified I2PAT is used in the PARALLEL and so a pattern
3270 	 with a modified I2SRC would not match.  */
3271 
3272       if (flag_expensive_optimizations)
3273 	{
3274 	  /* Pass pc_rtx so no substitutions are done, just
3275 	     simplifications.  */
3276 	  if (i1)
3277 	    {
3278 	      subst_low_luid = DF_INSN_LUID (i1);
3279 	      i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3280 	    }
3281 
3282 	  subst_low_luid = DF_INSN_LUID (i2);
3283 	  i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3284 	}
3285 
3286       n_occurrences = 0;		/* `subst' counts here */
3287       subst_low_luid = DF_INSN_LUID (i2);
3288 
3289       /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3290 	 copy of I2SRC each time we substitute it, in order to avoid creating
3291 	 self-referential RTL when we will be substituting I1SRC for I1DEST
3292 	 later.  Likewise if I0 feeds into I2, either directly or indirectly
3293 	 through I1, and I0DEST is in I0SRC.  */
3294       newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3295 		      (i1_feeds_i2_n && i1dest_in_i1src)
3296 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3297 			  && i0dest_in_i0src));
3298       substed_i2 = 1;
3299 
3300       /* Record whether I2's body now appears within I3's body.  */
3301       i2_is_used = n_occurrences;
3302     }
3303 
3304   /* If we already got a failure, don't try to do more.  Otherwise, try to
3305      substitute I1 if we have it.  */
3306 
3307   if (i1 && GET_CODE (newpat) != CLOBBER)
3308     {
3309       /* Check that an autoincrement side-effect on I1 has not been lost.
3310 	 This happens if I1DEST is mentioned in I2 and dies there, and
3311 	 has disappeared from the new pattern.  */
3312       if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3313 	   && i1_feeds_i2_n
3314 	   && dead_or_set_p (i2, i1dest)
3315 	   && !reg_overlap_mentioned_p (i1dest, newpat))
3316 	   /* Before we can do this substitution, we must redo the test done
3317 	      above (see detailed comments there) that ensures I1DEST isn't
3318 	      mentioned in any SETs in NEWPAT that are field assignments.  */
3319 	  || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3320 				0, 0, 0))
3321 	{
3322 	  undo_all ();
3323 	  return 0;
3324 	}
3325 
3326       n_occurrences = 0;
3327       subst_low_luid = DF_INSN_LUID (i1);
3328 
3329       /* If the following substitution will modify I1SRC, make a copy of it
3330 	 for the case where it is substituted for I1DEST in I2PAT later.  */
3331       if (added_sets_2 && i1_feeds_i2_n)
3332 	i1src_copy = copy_rtx (i1src);
3333 
3334       /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3335 	 copy of I1SRC each time we substitute it, in order to avoid creating
3336 	 self-referential RTL when we will be substituting I0SRC for I0DEST
3337 	 later.  */
3338       newpat = subst (newpat, i1dest, i1src, 0, 0,
3339 		      i0_feeds_i1_n && i0dest_in_i0src);
3340       substed_i1 = 1;
3341 
3342       /* Record whether I1's body now appears within I3's body.  */
3343       i1_is_used = n_occurrences;
3344     }
3345 
3346   /* Likewise for I0 if we have it.  */
3347 
3348   if (i0 && GET_CODE (newpat) != CLOBBER)
3349     {
3350       if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3351 	   && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3352 	       || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3353 	   && !reg_overlap_mentioned_p (i0dest, newpat))
3354 	  || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3355 				0, 0, 0))
3356 	{
3357 	  undo_all ();
3358 	  return 0;
3359 	}
3360 
3361       /* If the following substitution will modify I0SRC, make a copy of it
3362 	 for the case where it is substituted for I0DEST in I1PAT later.  */
3363       if (added_sets_1 && i0_feeds_i1_n)
3364 	i0src_copy = copy_rtx (i0src);
3365       /* And a copy for I0DEST in I2PAT substitution.  */
3366       if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3367 			   || (i0_feeds_i2_n)))
3368 	i0src_copy2 = copy_rtx (i0src);
3369 
3370       n_occurrences = 0;
3371       subst_low_luid = DF_INSN_LUID (i0);
3372       newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3373       substed_i0 = 1;
3374     }
3375 
3376   /* Fail if an autoincrement side-effect has been duplicated.  Be careful
3377      to count all the ways that I2SRC and I1SRC can be used.  */
3378   if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3379        && i2_is_used + added_sets_2 > 1)
3380       || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3381 	  && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3382 	      > 1))
3383       || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3384 	  && (n_occurrences + added_sets_0
3385 	      + (added_sets_1 && i0_feeds_i1_n)
3386 	      + (added_sets_2 && i0_feeds_i2_n)
3387 	      > 1))
3388       /* Fail if we tried to make a new register.  */
3389       || max_reg_num () != maxreg
3390       /* Fail if we couldn't do something and have a CLOBBER.  */
3391       || GET_CODE (newpat) == CLOBBER
3392       /* Fail if this new pattern is a MULT and we didn't have one before
3393 	 at the outer level.  */
3394       || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3395 	  && ! have_mult))
3396     {
3397       undo_all ();
3398       return 0;
3399     }
3400 
3401   /* If the actions of the earlier insns must be kept
3402      in addition to substituting them into the latest one,
3403      we must make a new PARALLEL for the latest insn
3404      to hold additional the SETs.  */
3405 
3406   if (added_sets_0 || added_sets_1 || added_sets_2)
3407     {
3408       int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3409       combine_extras++;
3410 
3411       if (GET_CODE (newpat) == PARALLEL)
3412 	{
3413 	  rtvec old = XVEC (newpat, 0);
3414 	  total_sets = XVECLEN (newpat, 0) + extra_sets;
3415 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3416 	  memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3417 		  sizeof (old->elem[0]) * old->num_elem);
3418 	}
3419       else
3420 	{
3421 	  rtx old = newpat;
3422 	  total_sets = 1 + extra_sets;
3423 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3424 	  XVECEXP (newpat, 0, 0) = old;
3425 	}
3426 
3427       if (added_sets_0)
3428 	XVECEXP (newpat, 0, --total_sets) = i0pat;
3429 
3430       if (added_sets_1)
3431 	{
3432 	  rtx t = i1pat;
3433 	  if (i0_feeds_i1_n)
3434 	    t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3435 
3436 	  XVECEXP (newpat, 0, --total_sets) = t;
3437 	}
3438       if (added_sets_2)
3439 	{
3440 	  rtx t = i2pat;
3441 	  if (i1_feeds_i2_n)
3442 	    t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3443 		       i0_feeds_i1_n && i0dest_in_i0src);
3444 	  if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3445 	    t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3446 
3447 	  XVECEXP (newpat, 0, --total_sets) = t;
3448 	}
3449     }
3450 
3451  validate_replacement:
3452 
3453   /* Note which hard regs this insn has as inputs.  */
3454   mark_used_regs_combine (newpat);
3455 
3456   /* If recog_for_combine fails, it strips existing clobbers.  If we'll
3457      consider splitting this pattern, we might need these clobbers.  */
3458   if (i1 && GET_CODE (newpat) == PARALLEL
3459       && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3460     {
3461       int len = XVECLEN (newpat, 0);
3462 
3463       newpat_vec_with_clobbers = rtvec_alloc (len);
3464       for (i = 0; i < len; i++)
3465 	RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3466     }
3467 
3468   /* We have recognized nothing yet.  */
3469   insn_code_number = -1;
3470 
3471   /* See if this is a PARALLEL of two SETs where one SET's destination is
3472      a register that is unused and this isn't marked as an instruction that
3473      might trap in an EH region.  In that case, we just need the other SET.
3474      We prefer this over the PARALLEL.
3475 
3476      This can occur when simplifying a divmod insn.  We *must* test for this
3477      case here because the code below that splits two independent SETs doesn't
3478      handle this case correctly when it updates the register status.
3479 
3480      It's pointless doing this if we originally had two sets, one from
3481      i3, and one from i2.  Combining then splitting the parallel results
3482      in the original i2 again plus an invalid insn (which we delete).
3483      The net effect is only to move instructions around, which makes
3484      debug info less accurate.  */
3485 
3486   if (!(added_sets_2 && i1 == 0)
3487       && is_parallel_of_n_reg_sets (newpat, 2)
3488       && asm_noperands (newpat) < 0)
3489     {
3490       rtx set0 = XVECEXP (newpat, 0, 0);
3491       rtx set1 = XVECEXP (newpat, 0, 1);
3492       rtx oldpat = newpat;
3493 
3494       if (((REG_P (SET_DEST (set1))
3495 	    && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3496 	   || (GET_CODE (SET_DEST (set1)) == SUBREG
3497 	       && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3498 	  && insn_nothrow_p (i3)
3499 	  && !side_effects_p (SET_SRC (set1)))
3500 	{
3501 	  newpat = set0;
3502 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3503 	}
3504 
3505       else if (((REG_P (SET_DEST (set0))
3506 		 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3507 		|| (GET_CODE (SET_DEST (set0)) == SUBREG
3508 		    && find_reg_note (i3, REG_UNUSED,
3509 				      SUBREG_REG (SET_DEST (set0)))))
3510 	       && insn_nothrow_p (i3)
3511 	       && !side_effects_p (SET_SRC (set0)))
3512 	{
3513 	  newpat = set1;
3514 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3515 
3516 	  if (insn_code_number >= 0)
3517 	    changed_i3_dest = 1;
3518 	}
3519 
3520       if (insn_code_number < 0)
3521 	newpat = oldpat;
3522     }
3523 
3524   /* Is the result of combination a valid instruction?  */
3525   if (insn_code_number < 0)
3526     insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3527 
3528   /* If we were combining three insns and the result is a simple SET
3529      with no ASM_OPERANDS that wasn't recognized, try to split it into two
3530      insns.  There are two ways to do this.  It can be split using a
3531      machine-specific method (like when you have an addition of a large
3532      constant) or by combine in the function find_split_point.  */
3533 
3534   if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3535       && asm_noperands (newpat) < 0)
3536     {
3537       rtx parallel, *split;
3538       rtx_insn *m_split_insn;
3539 
3540       /* See if the MD file can split NEWPAT.  If it can't, see if letting it
3541 	 use I2DEST as a scratch register will help.  In the latter case,
3542 	 convert I2DEST to the mode of the source of NEWPAT if we can.  */
3543 
3544       m_split_insn = combine_split_insns (newpat, i3);
3545 
3546       /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3547 	 inputs of NEWPAT.  */
3548 
3549       /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3550 	 possible to try that as a scratch reg.  This would require adding
3551 	 more code to make it work though.  */
3552 
3553       if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3554 	{
3555 	  machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3556 
3557 	  /* ??? Reusing i2dest without resetting the reg_stat entry for it
3558 	     (temporarily, until we are committed to this instruction
3559 	     combination) does not work: for example, any call to nonzero_bits
3560 	     on the register (from a splitter in the MD file, for example)
3561 	     will get the old information, which is invalid.
3562 
3563 	     Since nowadays we can create registers during combine just fine,
3564 	     we should just create a new one here, not reuse i2dest.  */
3565 
3566 	  /* First try to split using the original register as a
3567 	     scratch register.  */
3568 	  parallel = gen_rtx_PARALLEL (VOIDmode,
3569 				       gen_rtvec (2, newpat,
3570 						  gen_rtx_CLOBBER (VOIDmode,
3571 								   i2dest)));
3572 	  m_split_insn = combine_split_insns (parallel, i3);
3573 
3574 	  /* If that didn't work, try changing the mode of I2DEST if
3575 	     we can.  */
3576 	  if (m_split_insn == 0
3577 	      && new_mode != GET_MODE (i2dest)
3578 	      && new_mode != VOIDmode
3579 	      && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3580 	    {
3581 	      machine_mode old_mode = GET_MODE (i2dest);
3582 	      rtx ni2dest;
3583 
3584 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3585 		ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3586 	      else
3587 		{
3588 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3589 		  ni2dest = regno_reg_rtx[REGNO (i2dest)];
3590 		}
3591 
3592 	      parallel = (gen_rtx_PARALLEL
3593 			  (VOIDmode,
3594 			   gen_rtvec (2, newpat,
3595 				      gen_rtx_CLOBBER (VOIDmode,
3596 						       ni2dest))));
3597 	      m_split_insn = combine_split_insns (parallel, i3);
3598 
3599 	      if (m_split_insn == 0
3600 		  && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3601 		{
3602 		  struct undo *buf;
3603 
3604 		  adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3605 		  buf = undobuf.undos;
3606 		  undobuf.undos = buf->next;
3607 		  buf->next = undobuf.frees;
3608 		  undobuf.frees = buf;
3609 		}
3610 	    }
3611 
3612 	  i2scratch = m_split_insn != 0;
3613 	}
3614 
3615       /* If recog_for_combine has discarded clobbers, try to use them
3616 	 again for the split.  */
3617       if (m_split_insn == 0 && newpat_vec_with_clobbers)
3618 	{
3619 	  parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3620 	  m_split_insn = combine_split_insns (parallel, i3);
3621 	}
3622 
3623       if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3624 	{
3625 	  rtx m_split_pat = PATTERN (m_split_insn);
3626 	  insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3627 	  if (insn_code_number >= 0)
3628 	    newpat = m_split_pat;
3629 	}
3630       else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3631 	       && (next_nonnote_nondebug_insn (i2) == i3
3632 		   || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3633 	{
3634 	  rtx i2set, i3set;
3635 	  rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3636 	  newi2pat = PATTERN (m_split_insn);
3637 
3638 	  i3set = single_set (NEXT_INSN (m_split_insn));
3639 	  i2set = single_set (m_split_insn);
3640 
3641 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3642 
3643 	  /* If I2 or I3 has multiple SETs, we won't know how to track
3644 	     register status, so don't use these insns.  If I2's destination
3645 	     is used between I2 and I3, we also can't use these insns.  */
3646 
3647 	  if (i2_code_number >= 0 && i2set && i3set
3648 	      && (next_nonnote_nondebug_insn (i2) == i3
3649 		  || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3650 	    insn_code_number = recog_for_combine (&newi3pat, i3,
3651 						  &new_i3_notes);
3652 	  if (insn_code_number >= 0)
3653 	    newpat = newi3pat;
3654 
3655 	  /* It is possible that both insns now set the destination of I3.
3656 	     If so, we must show an extra use of it.  */
3657 
3658 	  if (insn_code_number >= 0)
3659 	    {
3660 	      rtx new_i3_dest = SET_DEST (i3set);
3661 	      rtx new_i2_dest = SET_DEST (i2set);
3662 
3663 	      while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3664 		     || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3665 		     || GET_CODE (new_i3_dest) == SUBREG)
3666 		new_i3_dest = XEXP (new_i3_dest, 0);
3667 
3668 	      while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3669 		     || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3670 		     || GET_CODE (new_i2_dest) == SUBREG)
3671 		new_i2_dest = XEXP (new_i2_dest, 0);
3672 
3673 	      if (REG_P (new_i3_dest)
3674 		  && REG_P (new_i2_dest)
3675 		  && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3676 		  && REGNO (new_i2_dest) < reg_n_sets_max)
3677 		INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3678 	    }
3679 	}
3680 
3681       /* If we can split it and use I2DEST, go ahead and see if that
3682 	 helps things be recognized.  Verify that none of the registers
3683 	 are set between I2 and I3.  */
3684       if (insn_code_number < 0
3685           && (split = find_split_point (&newpat, i3, false)) != 0
3686 	  && (!HAVE_cc0 || REG_P (i2dest))
3687 	  /* We need I2DEST in the proper mode.  If it is a hard register
3688 	     or the only use of a pseudo, we can change its mode.
3689 	     Make sure we don't change a hard register to have a mode that
3690 	     isn't valid for it, or change the number of registers.  */
3691 	  && (GET_MODE (*split) == GET_MODE (i2dest)
3692 	      || GET_MODE (*split) == VOIDmode
3693 	      || can_change_dest_mode (i2dest, added_sets_2,
3694 				       GET_MODE (*split)))
3695 	  && (next_nonnote_nondebug_insn (i2) == i3
3696 	      || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3697 	  /* We can't overwrite I2DEST if its value is still used by
3698 	     NEWPAT.  */
3699 	  && ! reg_referenced_p (i2dest, newpat))
3700 	{
3701 	  rtx newdest = i2dest;
3702 	  enum rtx_code split_code = GET_CODE (*split);
3703 	  machine_mode split_mode = GET_MODE (*split);
3704 	  bool subst_done = false;
3705 	  newi2pat = NULL_RTX;
3706 
3707 	  i2scratch = true;
3708 
3709 	  /* *SPLIT may be part of I2SRC, so make sure we have the
3710 	     original expression around for later debug processing.
3711 	     We should not need I2SRC any more in other cases.  */
3712 	  if (MAY_HAVE_DEBUG_INSNS)
3713 	    i2src = copy_rtx (i2src);
3714 	  else
3715 	    i2src = NULL;
3716 
3717 	  /* Get NEWDEST as a register in the proper mode.  We have already
3718 	     validated that we can do this.  */
3719 	  if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3720 	    {
3721 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3722 		newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3723 	      else
3724 		{
3725 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3726 		  newdest = regno_reg_rtx[REGNO (i2dest)];
3727 		}
3728 	    }
3729 
3730 	  /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3731 	     an ASHIFT.  This can occur if it was inside a PLUS and hence
3732 	     appeared to be a memory address.  This is a kludge.  */
3733 	  if (split_code == MULT
3734 	      && CONST_INT_P (XEXP (*split, 1))
3735 	      && INTVAL (XEXP (*split, 1)) > 0
3736 	      && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3737 	    {
3738 	      SUBST (*split, gen_rtx_ASHIFT (split_mode,
3739 					     XEXP (*split, 0), GEN_INT (i)));
3740 	      /* Update split_code because we may not have a multiply
3741 		 anymore.  */
3742 	      split_code = GET_CODE (*split);
3743 	    }
3744 
3745 	  /* Similarly for (plus (mult FOO (const_int pow2))).  */
3746 	  if (split_code == PLUS
3747 	      && GET_CODE (XEXP (*split, 0)) == MULT
3748 	      && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3749 	      && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3750 	      && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3751 	    {
3752 	      rtx nsplit = XEXP (*split, 0);
3753 	      SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3754 					     XEXP (nsplit, 0), GEN_INT (i)));
3755 	      /* Update split_code because we may not have a multiply
3756 		 anymore.  */
3757 	      split_code = GET_CODE (*split);
3758 	    }
3759 
3760 #ifdef INSN_SCHEDULING
3761 	  /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3762 	     be written as a ZERO_EXTEND.  */
3763 	  if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3764 	    {
3765 	      /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3766 		 what it really is.  */
3767 	      if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3768 		  == SIGN_EXTEND)
3769 		SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3770 						    SUBREG_REG (*split)));
3771 	      else
3772 		SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3773 						    SUBREG_REG (*split)));
3774 	    }
3775 #endif
3776 
3777 	  /* Attempt to split binary operators using arithmetic identities.  */
3778 	  if (BINARY_P (SET_SRC (newpat))
3779 	      && split_mode == GET_MODE (SET_SRC (newpat))
3780 	      && ! side_effects_p (SET_SRC (newpat)))
3781 	    {
3782 	      rtx setsrc = SET_SRC (newpat);
3783 	      machine_mode mode = GET_MODE (setsrc);
3784 	      enum rtx_code code = GET_CODE (setsrc);
3785 	      rtx src_op0 = XEXP (setsrc, 0);
3786 	      rtx src_op1 = XEXP (setsrc, 1);
3787 
3788 	      /* Split "X = Y op Y" as "Z = Y; X = Z op Z".  */
3789 	      if (rtx_equal_p (src_op0, src_op1))
3790 		{
3791 		  newi2pat = gen_rtx_SET (newdest, src_op0);
3792 		  SUBST (XEXP (setsrc, 0), newdest);
3793 		  SUBST (XEXP (setsrc, 1), newdest);
3794 		  subst_done = true;
3795 		}
3796 	      /* Split "((P op Q) op R) op S" where op is PLUS or MULT.  */
3797 	      else if ((code == PLUS || code == MULT)
3798 		       && GET_CODE (src_op0) == code
3799 		       && GET_CODE (XEXP (src_op0, 0)) == code
3800 		       && (INTEGRAL_MODE_P (mode)
3801 			   || (FLOAT_MODE_P (mode)
3802 			       && flag_unsafe_math_optimizations)))
3803 		{
3804 		  rtx p = XEXP (XEXP (src_op0, 0), 0);
3805 		  rtx q = XEXP (XEXP (src_op0, 0), 1);
3806 		  rtx r = XEXP (src_op0, 1);
3807 		  rtx s = src_op1;
3808 
3809 		  /* Split both "((X op Y) op X) op Y" and
3810 		     "((X op Y) op Y) op X" as "T op T" where T is
3811 		     "X op Y".  */
3812 		  if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3813 		       || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3814 		    {
3815 		      newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3816 		      SUBST (XEXP (setsrc, 0), newdest);
3817 		      SUBST (XEXP (setsrc, 1), newdest);
3818 		      subst_done = true;
3819 		    }
3820 		  /* Split "((X op X) op Y) op Y)" as "T op T" where
3821 		     T is "X op Y".  */
3822 		  else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3823 		    {
3824 		      rtx tmp = simplify_gen_binary (code, mode, p, r);
3825 		      newi2pat = gen_rtx_SET (newdest, tmp);
3826 		      SUBST (XEXP (setsrc, 0), newdest);
3827 		      SUBST (XEXP (setsrc, 1), newdest);
3828 		      subst_done = true;
3829 		    }
3830 		}
3831 	    }
3832 
3833 	  if (!subst_done)
3834 	    {
3835 	      newi2pat = gen_rtx_SET (newdest, *split);
3836 	      SUBST (*split, newdest);
3837 	    }
3838 
3839 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3840 
3841 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3842 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3843 	  if (GET_CODE (newi2pat) == PARALLEL)
3844 	    for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3845 	      if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3846 		{
3847 		  rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3848 		  if (reg_overlap_mentioned_p (reg, newpat))
3849 		    {
3850 		      undo_all ();
3851 		      return 0;
3852 		    }
3853 		}
3854 
3855 	  /* If the split point was a MULT and we didn't have one before,
3856 	     don't use one now.  */
3857 	  if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3858 	    insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3859 	}
3860     }
3861 
3862   /* Check for a case where we loaded from memory in a narrow mode and
3863      then sign extended it, but we need both registers.  In that case,
3864      we have a PARALLEL with both loads from the same memory location.
3865      We can split this into a load from memory followed by a register-register
3866      copy.  This saves at least one insn, more if register allocation can
3867      eliminate the copy.
3868 
3869      We cannot do this if the destination of the first assignment is a
3870      condition code register or cc0.  We eliminate this case by making sure
3871      the SET_DEST and SET_SRC have the same mode.
3872 
3873      We cannot do this if the destination of the second assignment is
3874      a register that we have already assumed is zero-extended.  Similarly
3875      for a SUBREG of such a register.  */
3876 
3877   else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3878 	   && GET_CODE (newpat) == PARALLEL
3879 	   && XVECLEN (newpat, 0) == 2
3880 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3881 	   && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3882 	   && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3883 	       == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3884 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3885 	   && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3886 			   XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3887 	   && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3888 				   DF_INSN_LUID (i2))
3889 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3890 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3891 	   && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3892 		 (REG_P (temp_expr)
3893 		  && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3894 		  && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3895 		  && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3896 		  && (reg_stat[REGNO (temp_expr)].nonzero_bits
3897 		      != GET_MODE_MASK (word_mode))))
3898 	   && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3899 		 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3900 		     (REG_P (temp_expr)
3901 		      && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3902 		      && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3903 		      && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3904 		      && (reg_stat[REGNO (temp_expr)].nonzero_bits
3905 			  != GET_MODE_MASK (word_mode)))))
3906 	   && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3907 					 SET_SRC (XVECEXP (newpat, 0, 1)))
3908 	   && ! find_reg_note (i3, REG_UNUSED,
3909 			       SET_DEST (XVECEXP (newpat, 0, 0))))
3910     {
3911       rtx ni2dest;
3912 
3913       newi2pat = XVECEXP (newpat, 0, 0);
3914       ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3915       newpat = XVECEXP (newpat, 0, 1);
3916       SUBST (SET_SRC (newpat),
3917 	     gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3918       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3919 
3920       if (i2_code_number >= 0)
3921 	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3922 
3923       if (insn_code_number >= 0)
3924 	swap_i2i3 = 1;
3925     }
3926 
3927   /* Similarly, check for a case where we have a PARALLEL of two independent
3928      SETs but we started with three insns.  In this case, we can do the sets
3929      as two separate insns.  This case occurs when some SET allows two
3930      other insns to combine, but the destination of that SET is still live.
3931 
3932      Also do this if we started with two insns and (at least) one of the
3933      resulting sets is a noop; this noop will be deleted later.  */
3934 
3935   else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3936 	   && GET_CODE (newpat) == PARALLEL
3937 	   && XVECLEN (newpat, 0) == 2
3938 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3939 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3940 	   && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3941 		  || set_noop_p (XVECEXP (newpat, 0, 1)))
3942 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3943 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3944 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3945 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3946 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3947 				  XVECEXP (newpat, 0, 0))
3948 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3949 				  XVECEXP (newpat, 0, 1))
3950 	   && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3951 		 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3952     {
3953       rtx set0 = XVECEXP (newpat, 0, 0);
3954       rtx set1 = XVECEXP (newpat, 0, 1);
3955 
3956       /* Normally, it doesn't matter which of the two is done first,
3957 	 but the one that references cc0 can't be the second, and
3958 	 one which uses any regs/memory set in between i2 and i3 can't
3959 	 be first.  The PARALLEL might also have been pre-existing in i3,
3960 	 so we need to make sure that we won't wrongly hoist a SET to i2
3961 	 that would conflict with a death note present in there.  */
3962       if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3963 	  && !(REG_P (SET_DEST (set1))
3964 	       && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3965 	  && !(GET_CODE (SET_DEST (set1)) == SUBREG
3966 	       && find_reg_note (i2, REG_DEAD,
3967 				 SUBREG_REG (SET_DEST (set1))))
3968 	  && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3969 	  /* If I3 is a jump, ensure that set0 is a jump so that
3970 	     we do not create invalid RTL.  */
3971 	  && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3972 	 )
3973 	{
3974 	  newi2pat = set1;
3975 	  newpat = set0;
3976 	}
3977       else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3978 	       && !(REG_P (SET_DEST (set0))
3979 		    && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3980 	       && !(GET_CODE (SET_DEST (set0)) == SUBREG
3981 		    && find_reg_note (i2, REG_DEAD,
3982 				      SUBREG_REG (SET_DEST (set0))))
3983 	       && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
3984 	       /* If I3 is a jump, ensure that set1 is a jump so that
3985 		  we do not create invalid RTL.  */
3986 	       && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3987 	      )
3988 	{
3989 	  newi2pat = set0;
3990 	  newpat = set1;
3991 	}
3992       else
3993 	{
3994 	  undo_all ();
3995 	  return 0;
3996 	}
3997 
3998       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3999 
4000       if (i2_code_number >= 0)
4001 	{
4002 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
4003 	     Make sure NEWPAT does not depend on the clobbered regs.  */
4004 	  if (GET_CODE (newi2pat) == PARALLEL)
4005 	    {
4006 	      for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4007 		if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4008 		  {
4009 		    rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4010 		    if (reg_overlap_mentioned_p (reg, newpat))
4011 		      {
4012 			undo_all ();
4013 			return 0;
4014 		      }
4015 		  }
4016 	    }
4017 
4018 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4019 	}
4020     }
4021 
4022   /* If it still isn't recognized, fail and change things back the way they
4023      were.  */
4024   if ((insn_code_number < 0
4025        /* Is the result a reasonable ASM_OPERANDS?  */
4026        && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4027     {
4028       undo_all ();
4029       return 0;
4030     }
4031 
4032   /* If we had to change another insn, make sure it is valid also.  */
4033   if (undobuf.other_insn)
4034     {
4035       CLEAR_HARD_REG_SET (newpat_used_regs);
4036 
4037       other_pat = PATTERN (undobuf.other_insn);
4038       other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4039 					     &new_other_notes);
4040 
4041       if (other_code_number < 0 && ! check_asm_operands (other_pat))
4042 	{
4043 	  undo_all ();
4044 	  return 0;
4045 	}
4046     }
4047 
4048   /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4049      they are adjacent to each other or not.  */
4050   if (HAVE_cc0)
4051     {
4052       rtx_insn *p = prev_nonnote_insn (i3);
4053       if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4054 	  && sets_cc0_p (newi2pat))
4055 	{
4056 	  undo_all ();
4057 	  return 0;
4058 	}
4059     }
4060 
4061   /* Only allow this combination if insn_rtx_costs reports that the
4062      replacement instructions are cheaper than the originals.  */
4063   if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4064     {
4065       undo_all ();
4066       return 0;
4067     }
4068 
4069   if (MAY_HAVE_DEBUG_INSNS)
4070     {
4071       struct undo *undo;
4072 
4073       for (undo = undobuf.undos; undo; undo = undo->next)
4074 	if (undo->kind == UNDO_MODE)
4075 	  {
4076 	    rtx reg = *undo->where.r;
4077 	    machine_mode new_mode = GET_MODE (reg);
4078 	    machine_mode old_mode = undo->old_contents.m;
4079 
4080 	    /* Temporarily revert mode back.  */
4081 	    adjust_reg_mode (reg, old_mode);
4082 
4083 	    if (reg == i2dest && i2scratch)
4084 	      {
4085 		/* If we used i2dest as a scratch register with a
4086 		   different mode, substitute it for the original
4087 		   i2src while its original mode is temporarily
4088 		   restored, and then clear i2scratch so that we don't
4089 		   do it again later.  */
4090 		propagate_for_debug (i2, last_combined_insn, reg, i2src,
4091 				     this_basic_block);
4092 		i2scratch = false;
4093 		/* Put back the new mode.  */
4094 		adjust_reg_mode (reg, new_mode);
4095 	      }
4096 	    else
4097 	      {
4098 		rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4099 		rtx_insn *first, *last;
4100 
4101 		if (reg == i2dest)
4102 		  {
4103 		    first = i2;
4104 		    last = last_combined_insn;
4105 		  }
4106 		else
4107 		  {
4108 		    first = i3;
4109 		    last = undobuf.other_insn;
4110 		    gcc_assert (last);
4111 		    if (DF_INSN_LUID (last)
4112 			< DF_INSN_LUID (last_combined_insn))
4113 		      last = last_combined_insn;
4114 		  }
4115 
4116 		/* We're dealing with a reg that changed mode but not
4117 		   meaning, so we want to turn it into a subreg for
4118 		   the new mode.  However, because of REG sharing and
4119 		   because its mode had already changed, we have to do
4120 		   it in two steps.  First, replace any debug uses of
4121 		   reg, with its original mode temporarily restored,
4122 		   with this copy we have created; then, replace the
4123 		   copy with the SUBREG of the original shared reg,
4124 		   once again changed to the new mode.  */
4125 		propagate_for_debug (first, last, reg, tempreg,
4126 				     this_basic_block);
4127 		adjust_reg_mode (reg, new_mode);
4128 		propagate_for_debug (first, last, tempreg,
4129 				     lowpart_subreg (old_mode, reg, new_mode),
4130 				     this_basic_block);
4131 	      }
4132 	  }
4133     }
4134 
4135   /* If we will be able to accept this, we have made a
4136      change to the destination of I3.  This requires us to
4137      do a few adjustments.  */
4138 
4139   if (changed_i3_dest)
4140     {
4141       PATTERN (i3) = newpat;
4142       adjust_for_new_dest (i3);
4143     }
4144 
4145   /* We now know that we can do this combination.  Merge the insns and
4146      update the status of registers and LOG_LINKS.  */
4147 
4148   if (undobuf.other_insn)
4149     {
4150       rtx note, next;
4151 
4152       PATTERN (undobuf.other_insn) = other_pat;
4153 
4154       /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4155 	 ensure that they are still valid.  Then add any non-duplicate
4156 	 notes added by recog_for_combine.  */
4157       for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4158 	{
4159 	  next = XEXP (note, 1);
4160 
4161 	  if ((REG_NOTE_KIND (note) == REG_DEAD
4162 	       && !reg_referenced_p (XEXP (note, 0),
4163 				     PATTERN (undobuf.other_insn)))
4164 	      ||(REG_NOTE_KIND (note) == REG_UNUSED
4165 		 && !reg_set_p (XEXP (note, 0),
4166 				PATTERN (undobuf.other_insn)))
4167 	      /* Simply drop equal note since it may be no longer valid
4168 		 for other_insn.  It may be possible to record that CC
4169 		 register is changed and only discard those notes, but
4170 		 in practice it's unnecessary complication and doesn't
4171 		 give any meaningful improvement.
4172 
4173 		 See PR78559.  */
4174 	      || REG_NOTE_KIND (note) == REG_EQUAL
4175 	      || REG_NOTE_KIND (note) == REG_EQUIV)
4176 	    remove_note (undobuf.other_insn, note);
4177 	}
4178 
4179       distribute_notes  (new_other_notes, undobuf.other_insn,
4180 			undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4181 			NULL_RTX);
4182     }
4183 
4184   if (swap_i2i3)
4185     {
4186       rtx_insn *insn;
4187       struct insn_link *link;
4188       rtx ni2dest;
4189 
4190       /* I3 now uses what used to be its destination and which is now
4191 	 I2's destination.  This requires us to do a few adjustments.  */
4192       PATTERN (i3) = newpat;
4193       adjust_for_new_dest (i3);
4194 
4195       /* We need a LOG_LINK from I3 to I2.  But we used to have one,
4196 	 so we still will.
4197 
4198 	 However, some later insn might be using I2's dest and have
4199 	 a LOG_LINK pointing at I3.  We must remove this link.
4200 	 The simplest way to remove the link is to point it at I1,
4201 	 which we know will be a NOTE.  */
4202 
4203       /* newi2pat is usually a SET here; however, recog_for_combine might
4204 	 have added some clobbers.  */
4205       if (GET_CODE (newi2pat) == PARALLEL)
4206 	ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4207       else
4208 	ni2dest = SET_DEST (newi2pat);
4209 
4210       for (insn = NEXT_INSN (i3);
4211 	   insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4212 		    || insn != BB_HEAD (this_basic_block->next_bb));
4213 	   insn = NEXT_INSN (insn))
4214 	{
4215 	  if (NONDEBUG_INSN_P (insn)
4216 	      && reg_referenced_p (ni2dest, PATTERN (insn)))
4217 	    {
4218 	      FOR_EACH_LOG_LINK (link, insn)
4219 		if (link->insn == i3)
4220 		  link->insn = i1;
4221 
4222 	      break;
4223 	    }
4224 	}
4225     }
4226 
4227   {
4228     rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4229     struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4230     rtx midnotes = 0;
4231     int from_luid;
4232     /* Compute which registers we expect to eliminate.  newi2pat may be setting
4233        either i3dest or i2dest, so we must check it.  */
4234     rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4235 		   || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4236 		   || !i2dest_killed
4237 		   ? 0 : i2dest);
4238     /* For i1, we need to compute both local elimination and global
4239        elimination information with respect to newi2pat because i1dest
4240        may be the same as i3dest, in which case newi2pat may be setting
4241        i1dest.  Global information is used when distributing REG_DEAD
4242        note for i2 and i3, in which case it does matter if newi2pat sets
4243        i1dest or not.
4244 
4245        Local information is used when distributing REG_DEAD note for i1,
4246        in which case it doesn't matter if newi2pat sets i1dest or not.
4247        See PR62151, if we have four insns combination:
4248 	   i0: r0 <- i0src
4249 	   i1: r1 <- i1src (using r0)
4250 		     REG_DEAD (r0)
4251 	   i2: r0 <- i2src (using r1)
4252 	   i3: r3 <- i3src (using r0)
4253 	   ix: using r0
4254        From i1's point of view, r0 is eliminated, no matter if it is set
4255        by newi2pat or not.  In other words, REG_DEAD info for r0 in i1
4256        should be discarded.
4257 
4258        Note local information only affects cases in forms like "I1->I2->I3",
4259        "I0->I1->I2->I3" or "I0&I1->I2, I2->I3".  For other cases like
4260        "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4261        i0dest anyway.  */
4262     rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4263 			 || !i1dest_killed
4264 			 ? 0 : i1dest);
4265     rtx elim_i1 = (local_elim_i1 == 0
4266 		   || (newi2pat && reg_set_p (i1dest, newi2pat))
4267 		   ? 0 : i1dest);
4268     /* Same case as i1.  */
4269     rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4270 			 ? 0 : i0dest);
4271     rtx elim_i0 = (local_elim_i0 == 0
4272 		   || (newi2pat && reg_set_p (i0dest, newi2pat))
4273 		   ? 0 : i0dest);
4274 
4275     /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4276        clear them.  */
4277     i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4278     i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4279     if (i1)
4280       i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4281     if (i0)
4282       i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4283 
4284     /* Ensure that we do not have something that should not be shared but
4285        occurs multiple times in the new insns.  Check this by first
4286        resetting all the `used' flags and then copying anything is shared.  */
4287 
4288     reset_used_flags (i3notes);
4289     reset_used_flags (i2notes);
4290     reset_used_flags (i1notes);
4291     reset_used_flags (i0notes);
4292     reset_used_flags (newpat);
4293     reset_used_flags (newi2pat);
4294     if (undobuf.other_insn)
4295       reset_used_flags (PATTERN (undobuf.other_insn));
4296 
4297     i3notes = copy_rtx_if_shared (i3notes);
4298     i2notes = copy_rtx_if_shared (i2notes);
4299     i1notes = copy_rtx_if_shared (i1notes);
4300     i0notes = copy_rtx_if_shared (i0notes);
4301     newpat = copy_rtx_if_shared (newpat);
4302     newi2pat = copy_rtx_if_shared (newi2pat);
4303     if (undobuf.other_insn)
4304       reset_used_flags (PATTERN (undobuf.other_insn));
4305 
4306     INSN_CODE (i3) = insn_code_number;
4307     PATTERN (i3) = newpat;
4308 
4309     if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4310       {
4311 	for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4312 	     link = XEXP (link, 1))
4313 	  {
4314 	    if (substed_i2)
4315 	      {
4316 		/* I2SRC must still be meaningful at this point.  Some
4317 		   splitting operations can invalidate I2SRC, but those
4318 		   operations do not apply to calls.  */
4319 		gcc_assert (i2src);
4320 		XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4321 						       i2dest, i2src);
4322 	      }
4323 	    if (substed_i1)
4324 	      XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4325 						     i1dest, i1src);
4326 	    if (substed_i0)
4327 	      XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4328 						     i0dest, i0src);
4329 	  }
4330       }
4331 
4332     if (undobuf.other_insn)
4333       INSN_CODE (undobuf.other_insn) = other_code_number;
4334 
4335     /* We had one special case above where I2 had more than one set and
4336        we replaced a destination of one of those sets with the destination
4337        of I3.  In that case, we have to update LOG_LINKS of insns later
4338        in this basic block.  Note that this (expensive) case is rare.
4339 
4340        Also, in this case, we must pretend that all REG_NOTEs for I2
4341        actually came from I3, so that REG_UNUSED notes from I2 will be
4342        properly handled.  */
4343 
4344     if (i3_subst_into_i2)
4345       {
4346 	for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4347 	  if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4348 	       || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4349 	      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4350 	      && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4351 	      && ! find_reg_note (i2, REG_UNUSED,
4352 				  SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4353 	    for (temp_insn = NEXT_INSN (i2);
4354 		 temp_insn
4355 		 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4356 		     || BB_HEAD (this_basic_block) != temp_insn);
4357 		 temp_insn = NEXT_INSN (temp_insn))
4358 	      if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4359 		FOR_EACH_LOG_LINK (link, temp_insn)
4360 		  if (link->insn == i2)
4361 		    link->insn = i3;
4362 
4363 	if (i3notes)
4364 	  {
4365 	    rtx link = i3notes;
4366 	    while (XEXP (link, 1))
4367 	      link = XEXP (link, 1);
4368 	    XEXP (link, 1) = i2notes;
4369 	  }
4370 	else
4371 	  i3notes = i2notes;
4372 	i2notes = 0;
4373       }
4374 
4375     LOG_LINKS (i3) = NULL;
4376     REG_NOTES (i3) = 0;
4377     LOG_LINKS (i2) = NULL;
4378     REG_NOTES (i2) = 0;
4379 
4380     if (newi2pat)
4381       {
4382 	if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4383 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4384 			       this_basic_block);
4385 	INSN_CODE (i2) = i2_code_number;
4386 	PATTERN (i2) = newi2pat;
4387       }
4388     else
4389       {
4390 	if (MAY_HAVE_DEBUG_INSNS && i2src)
4391 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4392 			       this_basic_block);
4393 	SET_INSN_DELETED (i2);
4394       }
4395 
4396     if (i1)
4397       {
4398 	LOG_LINKS (i1) = NULL;
4399 	REG_NOTES (i1) = 0;
4400 	if (MAY_HAVE_DEBUG_INSNS)
4401 	  propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4402 			       this_basic_block);
4403 	SET_INSN_DELETED (i1);
4404       }
4405 
4406     if (i0)
4407       {
4408 	LOG_LINKS (i0) = NULL;
4409 	REG_NOTES (i0) = 0;
4410 	if (MAY_HAVE_DEBUG_INSNS)
4411 	  propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4412 			       this_basic_block);
4413 	SET_INSN_DELETED (i0);
4414       }
4415 
4416     /* Get death notes for everything that is now used in either I3 or
4417        I2 and used to die in a previous insn.  If we built two new
4418        patterns, move from I1 to I2 then I2 to I3 so that we get the
4419        proper movement on registers that I2 modifies.  */
4420 
4421     if (i0)
4422       from_luid = DF_INSN_LUID (i0);
4423     else if (i1)
4424       from_luid = DF_INSN_LUID (i1);
4425     else
4426       from_luid = DF_INSN_LUID (i2);
4427     if (newi2pat)
4428       move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4429     move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4430 
4431     /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3.  */
4432     if (i3notes)
4433       distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4434 			elim_i2, elim_i1, elim_i0);
4435     if (i2notes)
4436       distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4437 			elim_i2, elim_i1, elim_i0);
4438     if (i1notes)
4439       distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4440 			elim_i2, local_elim_i1, local_elim_i0);
4441     if (i0notes)
4442       distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4443 			elim_i2, elim_i1, local_elim_i0);
4444     if (midnotes)
4445       distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4446 			elim_i2, elim_i1, elim_i0);
4447 
4448     /* Distribute any notes added to I2 or I3 by recog_for_combine.  We
4449        know these are REG_UNUSED and want them to go to the desired insn,
4450        so we always pass it as i3.  */
4451 
4452     if (newi2pat && new_i2_notes)
4453       distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4454 			NULL_RTX);
4455 
4456     if (new_i3_notes)
4457       distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4458 			NULL_RTX);
4459 
4460     /* If I3DEST was used in I3SRC, it really died in I3.  We may need to
4461        put a REG_DEAD note for it somewhere.  If NEWI2PAT exists and sets
4462        I3DEST, the death must be somewhere before I2, not I3.  If we passed I3
4463        in that case, it might delete I2.  Similarly for I2 and I1.
4464        Show an additional death due to the REG_DEAD note we make here.  If
4465        we discard it in distribute_notes, we will decrement it again.  */
4466 
4467     if (i3dest_killed)
4468       {
4469 	rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4470 	if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4471 	  distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4472 			    elim_i1, elim_i0);
4473 	else
4474 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4475 			    elim_i2, elim_i1, elim_i0);
4476       }
4477 
4478     if (i2dest_in_i2src)
4479       {
4480 	rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4481 	if (newi2pat && reg_set_p (i2dest, newi2pat))
4482 	  distribute_notes (new_note,  NULL, i2, NULL, NULL_RTX,
4483 			    NULL_RTX, NULL_RTX);
4484 	else
4485 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4486 			    NULL_RTX, NULL_RTX, NULL_RTX);
4487       }
4488 
4489     if (i1dest_in_i1src)
4490       {
4491 	rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4492 	if (newi2pat && reg_set_p (i1dest, newi2pat))
4493 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4494 			    NULL_RTX, NULL_RTX);
4495 	else
4496 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4497 			    NULL_RTX, NULL_RTX, NULL_RTX);
4498       }
4499 
4500     if (i0dest_in_i0src)
4501       {
4502 	rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4503 	if (newi2pat && reg_set_p (i0dest, newi2pat))
4504 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4505 			    NULL_RTX, NULL_RTX);
4506 	else
4507 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4508 			    NULL_RTX, NULL_RTX, NULL_RTX);
4509       }
4510 
4511     distribute_links (i3links);
4512     distribute_links (i2links);
4513     distribute_links (i1links);
4514     distribute_links (i0links);
4515 
4516     if (REG_P (i2dest))
4517       {
4518 	struct insn_link *link;
4519 	rtx_insn *i2_insn = 0;
4520 	rtx i2_val = 0, set;
4521 
4522 	/* The insn that used to set this register doesn't exist, and
4523 	   this life of the register may not exist either.  See if one of
4524 	   I3's links points to an insn that sets I2DEST.  If it does,
4525 	   that is now the last known value for I2DEST. If we don't update
4526 	   this and I2 set the register to a value that depended on its old
4527 	   contents, we will get confused.  If this insn is used, thing
4528 	   will be set correctly in combine_instructions.  */
4529 	FOR_EACH_LOG_LINK (link, i3)
4530 	  if ((set = single_set (link->insn)) != 0
4531 	      && rtx_equal_p (i2dest, SET_DEST (set)))
4532 	    i2_insn = link->insn, i2_val = SET_SRC (set);
4533 
4534 	record_value_for_reg (i2dest, i2_insn, i2_val);
4535 
4536 	/* If the reg formerly set in I2 died only once and that was in I3,
4537 	   zero its use count so it won't make `reload' do any work.  */
4538 	if (! added_sets_2
4539 	    && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4540 	    && ! i2dest_in_i2src
4541 	    && REGNO (i2dest) < reg_n_sets_max)
4542 	  INC_REG_N_SETS (REGNO (i2dest), -1);
4543       }
4544 
4545     if (i1 && REG_P (i1dest))
4546       {
4547 	struct insn_link *link;
4548 	rtx_insn *i1_insn = 0;
4549 	rtx i1_val = 0, set;
4550 
4551 	FOR_EACH_LOG_LINK (link, i3)
4552 	  if ((set = single_set (link->insn)) != 0
4553 	      && rtx_equal_p (i1dest, SET_DEST (set)))
4554 	    i1_insn = link->insn, i1_val = SET_SRC (set);
4555 
4556 	record_value_for_reg (i1dest, i1_insn, i1_val);
4557 
4558 	if (! added_sets_1
4559 	    && ! i1dest_in_i1src
4560 	    && REGNO (i1dest) < reg_n_sets_max)
4561 	  INC_REG_N_SETS (REGNO (i1dest), -1);
4562       }
4563 
4564     if (i0 && REG_P (i0dest))
4565       {
4566 	struct insn_link *link;
4567 	rtx_insn *i0_insn = 0;
4568 	rtx i0_val = 0, set;
4569 
4570 	FOR_EACH_LOG_LINK (link, i3)
4571 	  if ((set = single_set (link->insn)) != 0
4572 	      && rtx_equal_p (i0dest, SET_DEST (set)))
4573 	    i0_insn = link->insn, i0_val = SET_SRC (set);
4574 
4575 	record_value_for_reg (i0dest, i0_insn, i0_val);
4576 
4577 	if (! added_sets_0
4578 	    && ! i0dest_in_i0src
4579 	    && REGNO (i0dest) < reg_n_sets_max)
4580 	  INC_REG_N_SETS (REGNO (i0dest), -1);
4581       }
4582 
4583     /* Update reg_stat[].nonzero_bits et al for any changes that may have
4584        been made to this insn.  The order is important, because newi2pat
4585        can affect nonzero_bits of newpat.  */
4586     if (newi2pat)
4587       note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4588     note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4589   }
4590 
4591   if (undobuf.other_insn != NULL_RTX)
4592     {
4593       if (dump_file)
4594 	{
4595 	  fprintf (dump_file, "modifying other_insn ");
4596 	  dump_insn_slim (dump_file, undobuf.other_insn);
4597 	}
4598       df_insn_rescan (undobuf.other_insn);
4599     }
4600 
4601   if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4602     {
4603       if (dump_file)
4604 	{
4605 	  fprintf (dump_file, "modifying insn i0 ");
4606 	  dump_insn_slim (dump_file, i0);
4607 	}
4608       df_insn_rescan (i0);
4609     }
4610 
4611   if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4612     {
4613       if (dump_file)
4614 	{
4615 	  fprintf (dump_file, "modifying insn i1 ");
4616 	  dump_insn_slim (dump_file, i1);
4617 	}
4618       df_insn_rescan (i1);
4619     }
4620 
4621   if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4622     {
4623       if (dump_file)
4624 	{
4625 	  fprintf (dump_file, "modifying insn i2 ");
4626 	  dump_insn_slim (dump_file, i2);
4627 	}
4628       df_insn_rescan (i2);
4629     }
4630 
4631   if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4632     {
4633       if (dump_file)
4634 	{
4635 	  fprintf (dump_file, "modifying insn i3 ");
4636 	  dump_insn_slim (dump_file, i3);
4637 	}
4638       df_insn_rescan (i3);
4639     }
4640 
4641   /* Set new_direct_jump_p if a new return or simple jump instruction
4642      has been created.  Adjust the CFG accordingly.  */
4643   if (returnjump_p (i3) || any_uncondjump_p (i3))
4644     {
4645       *new_direct_jump_p = 1;
4646       mark_jump_label (PATTERN (i3), i3, 0);
4647       update_cfg_for_uncondjump (i3);
4648     }
4649 
4650   if (undobuf.other_insn != NULL_RTX
4651       && (returnjump_p (undobuf.other_insn)
4652 	  || any_uncondjump_p (undobuf.other_insn)))
4653     {
4654       *new_direct_jump_p = 1;
4655       update_cfg_for_uncondjump (undobuf.other_insn);
4656     }
4657 
4658   if (GET_CODE (PATTERN (i3)) == TRAP_IF
4659       && XEXP (PATTERN (i3), 0) == const1_rtx)
4660     {
4661       basic_block bb = BLOCK_FOR_INSN (i3);
4662       gcc_assert (bb);
4663       remove_edge (split_block (bb, i3));
4664       emit_barrier_after_bb (bb);
4665       *new_direct_jump_p = 1;
4666     }
4667 
4668   if (undobuf.other_insn
4669       && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4670       && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4671     {
4672       basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4673       gcc_assert (bb);
4674       remove_edge (split_block (bb, undobuf.other_insn));
4675       emit_barrier_after_bb (bb);
4676       *new_direct_jump_p = 1;
4677     }
4678 
4679   /* A noop might also need cleaning up of CFG, if it comes from the
4680      simplification of a jump.  */
4681   if (JUMP_P (i3)
4682       && GET_CODE (newpat) == SET
4683       && SET_SRC (newpat) == pc_rtx
4684       && SET_DEST (newpat) == pc_rtx)
4685     {
4686       *new_direct_jump_p = 1;
4687       update_cfg_for_uncondjump (i3);
4688     }
4689 
4690   if (undobuf.other_insn != NULL_RTX
4691       && JUMP_P (undobuf.other_insn)
4692       && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4693       && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4694       && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4695     {
4696       *new_direct_jump_p = 1;
4697       update_cfg_for_uncondjump (undobuf.other_insn);
4698     }
4699 
4700   combine_successes++;
4701   undo_commit ();
4702 
4703   if (added_links_insn
4704       && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4705       && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4706     return added_links_insn;
4707   else
4708     return newi2pat ? i2 : i3;
4709 }
4710 
4711 /* Get a marker for undoing to the current state.  */
4712 
4713 static void *
4714 get_undo_marker (void)
4715 {
4716   return undobuf.undos;
4717 }
4718 
4719 /* Undo the modifications up to the marker.  */
4720 
4721 static void
4722 undo_to_marker (void *marker)
4723 {
4724   struct undo *undo, *next;
4725 
4726   for (undo = undobuf.undos; undo != marker; undo = next)
4727     {
4728       gcc_assert (undo);
4729 
4730       next = undo->next;
4731       switch (undo->kind)
4732 	{
4733 	case UNDO_RTX:
4734 	  *undo->where.r = undo->old_contents.r;
4735 	  break;
4736 	case UNDO_INT:
4737 	  *undo->where.i = undo->old_contents.i;
4738 	  break;
4739 	case UNDO_MODE:
4740 	  adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4741 	  break;
4742 	case UNDO_LINKS:
4743 	  *undo->where.l = undo->old_contents.l;
4744 	  break;
4745 	default:
4746 	  gcc_unreachable ();
4747 	}
4748 
4749       undo->next = undobuf.frees;
4750       undobuf.frees = undo;
4751     }
4752 
4753   undobuf.undos = (struct undo *) marker;
4754 }
4755 
4756 /* Undo all the modifications recorded in undobuf.  */
4757 
4758 static void
4759 undo_all (void)
4760 {
4761   undo_to_marker (0);
4762 }
4763 
4764 /* We've committed to accepting the changes we made.  Move all
4765    of the undos to the free list.  */
4766 
4767 static void
4768 undo_commit (void)
4769 {
4770   struct undo *undo, *next;
4771 
4772   for (undo = undobuf.undos; undo; undo = next)
4773     {
4774       next = undo->next;
4775       undo->next = undobuf.frees;
4776       undobuf.frees = undo;
4777     }
4778   undobuf.undos = 0;
4779 }
4780 
4781 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4782    where we have an arithmetic expression and return that point.  LOC will
4783    be inside INSN.
4784 
4785    try_combine will call this function to see if an insn can be split into
4786    two insns.  */
4787 
4788 static rtx *
4789 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4790 {
4791   rtx x = *loc;
4792   enum rtx_code code = GET_CODE (x);
4793   rtx *split;
4794   unsigned HOST_WIDE_INT len = 0;
4795   HOST_WIDE_INT pos = 0;
4796   int unsignedp = 0;
4797   rtx inner = NULL_RTX;
4798 
4799   /* First special-case some codes.  */
4800   switch (code)
4801     {
4802     case SUBREG:
4803 #ifdef INSN_SCHEDULING
4804       /* If we are making a paradoxical SUBREG invalid, it becomes a split
4805 	 point.  */
4806       if (MEM_P (SUBREG_REG (x)))
4807 	return loc;
4808 #endif
4809       return find_split_point (&SUBREG_REG (x), insn, false);
4810 
4811     case MEM:
4812       /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4813 	 using LO_SUM and HIGH.  */
4814       if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4815 			  || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4816 	{
4817 	  machine_mode address_mode = get_address_mode (x);
4818 
4819 	  SUBST (XEXP (x, 0),
4820 		 gen_rtx_LO_SUM (address_mode,
4821 				 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4822 				 XEXP (x, 0)));
4823 	  return &XEXP (XEXP (x, 0), 0);
4824 	}
4825 
4826       /* If we have a PLUS whose second operand is a constant and the
4827 	 address is not valid, perhaps will can split it up using
4828 	 the machine-specific way to split large constants.  We use
4829 	 the first pseudo-reg (one of the virtual regs) as a placeholder;
4830 	 it will not remain in the result.  */
4831       if (GET_CODE (XEXP (x, 0)) == PLUS
4832 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4833 	  && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4834 					    MEM_ADDR_SPACE (x)))
4835 	{
4836 	  rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4837 	  rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4838 					       subst_insn);
4839 
4840 	  /* This should have produced two insns, each of which sets our
4841 	     placeholder.  If the source of the second is a valid address,
4842 	     we can make put both sources together and make a split point
4843 	     in the middle.  */
4844 
4845 	  if (seq
4846 	      && NEXT_INSN (seq) != NULL_RTX
4847 	      && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4848 	      && NONJUMP_INSN_P (seq)
4849 	      && GET_CODE (PATTERN (seq)) == SET
4850 	      && SET_DEST (PATTERN (seq)) == reg
4851 	      && ! reg_mentioned_p (reg,
4852 				    SET_SRC (PATTERN (seq)))
4853 	      && NONJUMP_INSN_P (NEXT_INSN (seq))
4854 	      && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4855 	      && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4856 	      && memory_address_addr_space_p
4857 		   (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4858 		    MEM_ADDR_SPACE (x)))
4859 	    {
4860 	      rtx src1 = SET_SRC (PATTERN (seq));
4861 	      rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4862 
4863 	      /* Replace the placeholder in SRC2 with SRC1.  If we can
4864 		 find where in SRC2 it was placed, that can become our
4865 		 split point and we can replace this address with SRC2.
4866 		 Just try two obvious places.  */
4867 
4868 	      src2 = replace_rtx (src2, reg, src1);
4869 	      split = 0;
4870 	      if (XEXP (src2, 0) == src1)
4871 		split = &XEXP (src2, 0);
4872 	      else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4873 		       && XEXP (XEXP (src2, 0), 0) == src1)
4874 		split = &XEXP (XEXP (src2, 0), 0);
4875 
4876 	      if (split)
4877 		{
4878 		  SUBST (XEXP (x, 0), src2);
4879 		  return split;
4880 		}
4881 	    }
4882 
4883 	  /* If that didn't work, perhaps the first operand is complex and
4884 	     needs to be computed separately, so make a split point there.
4885 	     This will occur on machines that just support REG + CONST
4886 	     and have a constant moved through some previous computation.  */
4887 
4888 	  else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4889 		   && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4890 			 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4891 	    return &XEXP (XEXP (x, 0), 0);
4892 	}
4893 
4894       /* If we have a PLUS whose first operand is complex, try computing it
4895          separately by making a split there.  */
4896       if (GET_CODE (XEXP (x, 0)) == PLUS
4897           && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4898 					    MEM_ADDR_SPACE (x))
4899           && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4900           && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4901                 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4902         return &XEXP (XEXP (x, 0), 0);
4903       break;
4904 
4905     case SET:
4906       /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4907 	 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4908 	 we need to put the operand into a register.  So split at that
4909 	 point.  */
4910 
4911       if (SET_DEST (x) == cc0_rtx
4912 	  && GET_CODE (SET_SRC (x)) != COMPARE
4913 	  && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4914 	  && !OBJECT_P (SET_SRC (x))
4915 	  && ! (GET_CODE (SET_SRC (x)) == SUBREG
4916 		&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4917 	return &SET_SRC (x);
4918 
4919       /* See if we can split SET_SRC as it stands.  */
4920       split = find_split_point (&SET_SRC (x), insn, true);
4921       if (split && split != &SET_SRC (x))
4922 	return split;
4923 
4924       /* See if we can split SET_DEST as it stands.  */
4925       split = find_split_point (&SET_DEST (x), insn, false);
4926       if (split && split != &SET_DEST (x))
4927 	return split;
4928 
4929       /* See if this is a bitfield assignment with everything constant.  If
4930 	 so, this is an IOR of an AND, so split it into that.  */
4931       if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4932 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4933 	  && CONST_INT_P (XEXP (SET_DEST (x), 1))
4934 	  && CONST_INT_P (XEXP (SET_DEST (x), 2))
4935 	  && CONST_INT_P (SET_SRC (x))
4936 	  && ((INTVAL (XEXP (SET_DEST (x), 1))
4937 	       + INTVAL (XEXP (SET_DEST (x), 2)))
4938 	      <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4939 	  && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4940 	{
4941 	  HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4942 	  unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4943 	  unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4944 	  rtx dest = XEXP (SET_DEST (x), 0);
4945 	  machine_mode mode = GET_MODE (dest);
4946 	  unsigned HOST_WIDE_INT mask
4947 	    = (HOST_WIDE_INT_1U << len) - 1;
4948 	  rtx or_mask;
4949 
4950 	  if (BITS_BIG_ENDIAN)
4951 	    pos = GET_MODE_PRECISION (mode) - len - pos;
4952 
4953 	  or_mask = gen_int_mode (src << pos, mode);
4954 	  if (src == mask)
4955 	    SUBST (SET_SRC (x),
4956 		   simplify_gen_binary (IOR, mode, dest, or_mask));
4957 	  else
4958 	    {
4959 	      rtx negmask = gen_int_mode (~(mask << pos), mode);
4960 	      SUBST (SET_SRC (x),
4961 		     simplify_gen_binary (IOR, mode,
4962 					  simplify_gen_binary (AND, mode,
4963 							       dest, negmask),
4964 					  or_mask));
4965 	    }
4966 
4967 	  SUBST (SET_DEST (x), dest);
4968 
4969 	  split = find_split_point (&SET_SRC (x), insn, true);
4970 	  if (split && split != &SET_SRC (x))
4971 	    return split;
4972 	}
4973 
4974       /* Otherwise, see if this is an operation that we can split into two.
4975 	 If so, try to split that.  */
4976       code = GET_CODE (SET_SRC (x));
4977 
4978       switch (code)
4979 	{
4980 	case AND:
4981 	  /* If we are AND'ing with a large constant that is only a single
4982 	     bit and the result is only being used in a context where we
4983 	     need to know if it is zero or nonzero, replace it with a bit
4984 	     extraction.  This will avoid the large constant, which might
4985 	     have taken more than one insn to make.  If the constant were
4986 	     not a valid argument to the AND but took only one insn to make,
4987 	     this is no worse, but if it took more than one insn, it will
4988 	     be better.  */
4989 
4990 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4991 	      && REG_P (XEXP (SET_SRC (x), 0))
4992 	      && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4993 	      && REG_P (SET_DEST (x))
4994 	      && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
4995 	      && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4996 	      && XEXP (*split, 0) == SET_DEST (x)
4997 	      && XEXP (*split, 1) == const0_rtx)
4998 	    {
4999 	      rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5000 						XEXP (SET_SRC (x), 0),
5001 						pos, NULL_RTX, 1, 1, 0, 0);
5002 	      if (extraction != 0)
5003 		{
5004 		  SUBST (SET_SRC (x), extraction);
5005 		  return find_split_point (loc, insn, false);
5006 		}
5007 	    }
5008 	  break;
5009 
5010 	case NE:
5011 	  /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5012 	     is known to be on, this can be converted into a NEG of a shift.  */
5013 	  if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5014 	      && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5015 	      && 1 <= (pos = exact_log2
5016 		       (nonzero_bits (XEXP (SET_SRC (x), 0),
5017 				      GET_MODE (XEXP (SET_SRC (x), 0))))))
5018 	    {
5019 	      machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5020 
5021 	      SUBST (SET_SRC (x),
5022 		     gen_rtx_NEG (mode,
5023 				  gen_rtx_LSHIFTRT (mode,
5024 						    XEXP (SET_SRC (x), 0),
5025 						    GEN_INT (pos))));
5026 
5027 	      split = find_split_point (&SET_SRC (x), insn, true);
5028 	      if (split && split != &SET_SRC (x))
5029 		return split;
5030 	    }
5031 	  break;
5032 
5033 	case SIGN_EXTEND:
5034 	  inner = XEXP (SET_SRC (x), 0);
5035 
5036 	  /* We can't optimize if either mode is a partial integer
5037 	     mode as we don't know how many bits are significant
5038 	     in those modes.  */
5039 	  if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
5040 	      || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5041 	    break;
5042 
5043 	  pos = 0;
5044 	  len = GET_MODE_PRECISION (GET_MODE (inner));
5045 	  unsignedp = 0;
5046 	  break;
5047 
5048 	case SIGN_EXTRACT:
5049 	case ZERO_EXTRACT:
5050 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5051 	      && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5052 	    {
5053 	      inner = XEXP (SET_SRC (x), 0);
5054 	      len = INTVAL (XEXP (SET_SRC (x), 1));
5055 	      pos = INTVAL (XEXP (SET_SRC (x), 2));
5056 
5057 	      if (BITS_BIG_ENDIAN)
5058 		pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
5059 	      unsignedp = (code == ZERO_EXTRACT);
5060 	    }
5061 	  break;
5062 
5063 	default:
5064 	  break;
5065 	}
5066 
5067       if (len && pos >= 0
5068 	  && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
5069 	{
5070 	  machine_mode mode = GET_MODE (SET_SRC (x));
5071 
5072 	  /* For unsigned, we have a choice of a shift followed by an
5073 	     AND or two shifts.  Use two shifts for field sizes where the
5074 	     constant might be too large.  We assume here that we can
5075 	     always at least get 8-bit constants in an AND insn, which is
5076 	     true for every current RISC.  */
5077 
5078 	  if (unsignedp && len <= 8)
5079 	    {
5080 	      unsigned HOST_WIDE_INT mask
5081 		= (HOST_WIDE_INT_1U << len) - 1;
5082 	      SUBST (SET_SRC (x),
5083 		     gen_rtx_AND (mode,
5084 				  gen_rtx_LSHIFTRT
5085 				  (mode, gen_lowpart (mode, inner),
5086 				   GEN_INT (pos)),
5087 				  gen_int_mode (mask, mode)));
5088 
5089 	      split = find_split_point (&SET_SRC (x), insn, true);
5090 	      if (split && split != &SET_SRC (x))
5091 		return split;
5092 	    }
5093 	  else
5094 	    {
5095 	      SUBST (SET_SRC (x),
5096 		     gen_rtx_fmt_ee
5097 		     (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5098 		      gen_rtx_ASHIFT (mode,
5099 				      gen_lowpart (mode, inner),
5100 				      GEN_INT (GET_MODE_PRECISION (mode)
5101 					       - len - pos)),
5102 		      GEN_INT (GET_MODE_PRECISION (mode) - len)));
5103 
5104 	      split = find_split_point (&SET_SRC (x), insn, true);
5105 	      if (split && split != &SET_SRC (x))
5106 		return split;
5107 	    }
5108 	}
5109 
5110       /* See if this is a simple operation with a constant as the second
5111 	 operand.  It might be that this constant is out of range and hence
5112 	 could be used as a split point.  */
5113       if (BINARY_P (SET_SRC (x))
5114 	  && CONSTANT_P (XEXP (SET_SRC (x), 1))
5115 	  && (OBJECT_P (XEXP (SET_SRC (x), 0))
5116 	      || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5117 		  && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5118 	return &XEXP (SET_SRC (x), 1);
5119 
5120       /* Finally, see if this is a simple operation with its first operand
5121 	 not in a register.  The operation might require this operand in a
5122 	 register, so return it as a split point.  We can always do this
5123 	 because if the first operand were another operation, we would have
5124 	 already found it as a split point.  */
5125       if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5126 	  && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5127 	return &XEXP (SET_SRC (x), 0);
5128 
5129       return 0;
5130 
5131     case AND:
5132     case IOR:
5133       /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5134 	 it is better to write this as (not (ior A B)) so we can split it.
5135 	 Similarly for IOR.  */
5136       if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5137 	{
5138 	  SUBST (*loc,
5139 		 gen_rtx_NOT (GET_MODE (x),
5140 			      gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5141 					      GET_MODE (x),
5142 					      XEXP (XEXP (x, 0), 0),
5143 					      XEXP (XEXP (x, 1), 0))));
5144 	  return find_split_point (loc, insn, set_src);
5145 	}
5146 
5147       /* Many RISC machines have a large set of logical insns.  If the
5148 	 second operand is a NOT, put it first so we will try to split the
5149 	 other operand first.  */
5150       if (GET_CODE (XEXP (x, 1)) == NOT)
5151 	{
5152 	  rtx tem = XEXP (x, 0);
5153 	  SUBST (XEXP (x, 0), XEXP (x, 1));
5154 	  SUBST (XEXP (x, 1), tem);
5155 	}
5156       break;
5157 
5158     case PLUS:
5159     case MINUS:
5160       /* Canonicalization can produce (minus A (mult B C)), where C is a
5161 	 constant.  It may be better to try splitting (plus (mult B -C) A)
5162 	 instead if this isn't a multiply by a power of two.  */
5163       if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5164 	  && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5165 	  && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5166 	{
5167 	  machine_mode mode = GET_MODE (x);
5168 	  unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5169 	  HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5170 	  SUBST (*loc, gen_rtx_PLUS (mode,
5171 				     gen_rtx_MULT (mode,
5172 						   XEXP (XEXP (x, 1), 0),
5173 						   gen_int_mode (other_int,
5174 								 mode)),
5175 				     XEXP (x, 0)));
5176 	  return find_split_point (loc, insn, set_src);
5177 	}
5178 
5179       /* Split at a multiply-accumulate instruction.  However if this is
5180          the SET_SRC, we likely do not have such an instruction and it's
5181          worthless to try this split.  */
5182       if (!set_src
5183 	  && (GET_CODE (XEXP (x, 0)) == MULT
5184 	      || (GET_CODE (XEXP (x, 0)) == ASHIFT
5185 		  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5186         return loc;
5187 
5188     default:
5189       break;
5190     }
5191 
5192   /* Otherwise, select our actions depending on our rtx class.  */
5193   switch (GET_RTX_CLASS (code))
5194     {
5195     case RTX_BITFIELD_OPS:		/* This is ZERO_EXTRACT and SIGN_EXTRACT.  */
5196     case RTX_TERNARY:
5197       split = find_split_point (&XEXP (x, 2), insn, false);
5198       if (split)
5199 	return split;
5200       /* fall through */
5201     case RTX_BIN_ARITH:
5202     case RTX_COMM_ARITH:
5203     case RTX_COMPARE:
5204     case RTX_COMM_COMPARE:
5205       split = find_split_point (&XEXP (x, 1), insn, false);
5206       if (split)
5207 	return split;
5208       /* fall through */
5209     case RTX_UNARY:
5210       /* Some machines have (and (shift ...) ...) insns.  If X is not
5211 	 an AND, but XEXP (X, 0) is, use it as our split point.  */
5212       if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5213 	return &XEXP (x, 0);
5214 
5215       split = find_split_point (&XEXP (x, 0), insn, false);
5216       if (split)
5217 	return split;
5218       return loc;
5219 
5220     default:
5221       /* Otherwise, we don't have a split point.  */
5222       return 0;
5223     }
5224 }
5225 
5226 /* Throughout X, replace FROM with TO, and return the result.
5227    The result is TO if X is FROM;
5228    otherwise the result is X, but its contents may have been modified.
5229    If they were modified, a record was made in undobuf so that
5230    undo_all will (among other things) return X to its original state.
5231 
5232    If the number of changes necessary is too much to record to undo,
5233    the excess changes are not made, so the result is invalid.
5234    The changes already made can still be undone.
5235    undobuf.num_undo is incremented for such changes, so by testing that
5236    the caller can tell whether the result is valid.
5237 
5238    `n_occurrences' is incremented each time FROM is replaced.
5239 
5240    IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5241 
5242    IN_COND is nonzero if we are at the top level of a condition.
5243 
5244    UNIQUE_COPY is nonzero if each substitution must be unique.  We do this
5245    by copying if `n_occurrences' is nonzero.  */
5246 
5247 static rtx
5248 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5249 {
5250   enum rtx_code code = GET_CODE (x);
5251   machine_mode op0_mode = VOIDmode;
5252   const char *fmt;
5253   int len, i;
5254   rtx new_rtx;
5255 
5256 /* Two expressions are equal if they are identical copies of a shared
5257    RTX or if they are both registers with the same register number
5258    and mode.  */
5259 
5260 #define COMBINE_RTX_EQUAL_P(X,Y)			\
5261   ((X) == (Y)						\
5262    || (REG_P (X) && REG_P (Y)	\
5263        && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5264 
5265   /* Do not substitute into clobbers of regs -- this will never result in
5266      valid RTL.  */
5267   if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5268     return x;
5269 
5270   if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5271     {
5272       n_occurrences++;
5273       return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5274     }
5275 
5276   /* If X and FROM are the same register but different modes, they
5277      will not have been seen as equal above.  However, the log links code
5278      will make a LOG_LINKS entry for that case.  If we do nothing, we
5279      will try to rerecognize our original insn and, when it succeeds,
5280      we will delete the feeding insn, which is incorrect.
5281 
5282      So force this insn not to match in this (rare) case.  */
5283   if (! in_dest && code == REG && REG_P (from)
5284       && reg_overlap_mentioned_p (x, from))
5285     return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5286 
5287   /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5288      of which may contain things that can be combined.  */
5289   if (code != MEM && code != LO_SUM && OBJECT_P (x))
5290     return x;
5291 
5292   /* It is possible to have a subexpression appear twice in the insn.
5293      Suppose that FROM is a register that appears within TO.
5294      Then, after that subexpression has been scanned once by `subst',
5295      the second time it is scanned, TO may be found.  If we were
5296      to scan TO here, we would find FROM within it and create a
5297      self-referent rtl structure which is completely wrong.  */
5298   if (COMBINE_RTX_EQUAL_P (x, to))
5299     return to;
5300 
5301   /* Parallel asm_operands need special attention because all of the
5302      inputs are shared across the arms.  Furthermore, unsharing the
5303      rtl results in recognition failures.  Failure to handle this case
5304      specially can result in circular rtl.
5305 
5306      Solve this by doing a normal pass across the first entry of the
5307      parallel, and only processing the SET_DESTs of the subsequent
5308      entries.  Ug.  */
5309 
5310   if (code == PARALLEL
5311       && GET_CODE (XVECEXP (x, 0, 0)) == SET
5312       && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5313     {
5314       new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5315 
5316       /* If this substitution failed, this whole thing fails.  */
5317       if (GET_CODE (new_rtx) == CLOBBER
5318 	  && XEXP (new_rtx, 0) == const0_rtx)
5319 	return new_rtx;
5320 
5321       SUBST (XVECEXP (x, 0, 0), new_rtx);
5322 
5323       for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5324 	{
5325 	  rtx dest = SET_DEST (XVECEXP (x, 0, i));
5326 
5327 	  if (!REG_P (dest)
5328 	      && GET_CODE (dest) != CC0
5329 	      && GET_CODE (dest) != PC)
5330 	    {
5331 	      new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5332 
5333 	      /* If this substitution failed, this whole thing fails.  */
5334 	      if (GET_CODE (new_rtx) == CLOBBER
5335 		  && XEXP (new_rtx, 0) == const0_rtx)
5336 		return new_rtx;
5337 
5338 	      SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5339 	    }
5340 	}
5341     }
5342   else
5343     {
5344       len = GET_RTX_LENGTH (code);
5345       fmt = GET_RTX_FORMAT (code);
5346 
5347       /* We don't need to process a SET_DEST that is a register, CC0,
5348 	 or PC, so set up to skip this common case.  All other cases
5349 	 where we want to suppress replacing something inside a
5350 	 SET_SRC are handled via the IN_DEST operand.  */
5351       if (code == SET
5352 	  && (REG_P (SET_DEST (x))
5353 	      || GET_CODE (SET_DEST (x)) == CC0
5354 	      || GET_CODE (SET_DEST (x)) == PC))
5355 	fmt = "ie";
5356 
5357       /* Trying to simplify the operands of a widening MULT is not likely
5358 	 to create RTL matching a machine insn.  */
5359       if (code == MULT
5360 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5361 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5362 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5363 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5364 	  && REG_P (XEXP (XEXP (x, 0), 0))
5365 	  && REG_P (XEXP (XEXP (x, 1), 0))
5366 	  && from == to)
5367 	return x;
5368 
5369 
5370       /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5371 	 constant.  */
5372       if (fmt[0] == 'e')
5373 	op0_mode = GET_MODE (XEXP (x, 0));
5374 
5375       for (i = 0; i < len; i++)
5376 	{
5377 	  if (fmt[i] == 'E')
5378 	    {
5379 	      int j;
5380 	      for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5381 		{
5382 		  if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5383 		    {
5384 		      new_rtx = (unique_copy && n_occurrences
5385 			     ? copy_rtx (to) : to);
5386 		      n_occurrences++;
5387 		    }
5388 		  else
5389 		    {
5390 		      new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5391 				       unique_copy);
5392 
5393 		      /* If this substitution failed, this whole thing
5394 			 fails.  */
5395 		      if (GET_CODE (new_rtx) == CLOBBER
5396 			  && XEXP (new_rtx, 0) == const0_rtx)
5397 			return new_rtx;
5398 		    }
5399 
5400 		  SUBST (XVECEXP (x, i, j), new_rtx);
5401 		}
5402 	    }
5403 	  else if (fmt[i] == 'e')
5404 	    {
5405 	      /* If this is a register being set, ignore it.  */
5406 	      new_rtx = XEXP (x, i);
5407 	      if (in_dest
5408 		  && i == 0
5409 		  && (((code == SUBREG || code == ZERO_EXTRACT)
5410 		       && REG_P (new_rtx))
5411 		      || code == STRICT_LOW_PART))
5412 		;
5413 
5414 	      else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5415 		{
5416 		  /* In general, don't install a subreg involving two
5417 		     modes not tieable.  It can worsen register
5418 		     allocation, and can even make invalid reload
5419 		     insns, since the reg inside may need to be copied
5420 		     from in the outside mode, and that may be invalid
5421 		     if it is an fp reg copied in integer mode.
5422 
5423 		     We allow two exceptions to this: It is valid if
5424 		     it is inside another SUBREG and the mode of that
5425 		     SUBREG and the mode of the inside of TO is
5426 		     tieable and it is valid if X is a SET that copies
5427 		     FROM to CC0.  */
5428 
5429 		  if (GET_CODE (to) == SUBREG
5430 		      && ! MODES_TIEABLE_P (GET_MODE (to),
5431 					    GET_MODE (SUBREG_REG (to)))
5432 		      && ! (code == SUBREG
5433 			    && MODES_TIEABLE_P (GET_MODE (x),
5434 						GET_MODE (SUBREG_REG (to))))
5435 		      && (!HAVE_cc0
5436 			  || (! (code == SET
5437 				 && i == 1
5438 				 && XEXP (x, 0) == cc0_rtx))))
5439 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5440 
5441 		  if (code == SUBREG
5442 		      && REG_P (to)
5443 		      && REGNO (to) < FIRST_PSEUDO_REGISTER
5444 		      && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5445 						SUBREG_BYTE (x),
5446 						GET_MODE (x)) < 0)
5447 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5448 
5449 		  new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5450 		  n_occurrences++;
5451 		}
5452 	      else
5453 		/* If we are in a SET_DEST, suppress most cases unless we
5454 		   have gone inside a MEM, in which case we want to
5455 		   simplify the address.  We assume here that things that
5456 		   are actually part of the destination have their inner
5457 		   parts in the first expression.  This is true for SUBREG,
5458 		   STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5459 		   things aside from REG and MEM that should appear in a
5460 		   SET_DEST.  */
5461 		new_rtx = subst (XEXP (x, i), from, to,
5462 			     (((in_dest
5463 				&& (code == SUBREG || code == STRICT_LOW_PART
5464 				    || code == ZERO_EXTRACT))
5465 			       || code == SET)
5466 			      && i == 0),
5467 				 code == IF_THEN_ELSE && i == 0,
5468 				 unique_copy);
5469 
5470 	      /* If we found that we will have to reject this combination,
5471 		 indicate that by returning the CLOBBER ourselves, rather than
5472 		 an expression containing it.  This will speed things up as
5473 		 well as prevent accidents where two CLOBBERs are considered
5474 		 to be equal, thus producing an incorrect simplification.  */
5475 
5476 	      if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5477 		return new_rtx;
5478 
5479 	      if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5480 		{
5481 		  machine_mode mode = GET_MODE (x);
5482 
5483 		  x = simplify_subreg (GET_MODE (x), new_rtx,
5484 				       GET_MODE (SUBREG_REG (x)),
5485 				       SUBREG_BYTE (x));
5486 		  if (! x)
5487 		    x = gen_rtx_CLOBBER (mode, const0_rtx);
5488 		}
5489 	      else if (CONST_SCALAR_INT_P (new_rtx)
5490 		       && (GET_CODE (x) == ZERO_EXTEND
5491 			   || GET_CODE (x) == FLOAT
5492 			   || GET_CODE (x) == UNSIGNED_FLOAT))
5493 		{
5494 		  x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5495 						new_rtx,
5496 						GET_MODE (XEXP (x, 0)));
5497 		  if (!x)
5498 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5499 		}
5500 	      else
5501 		SUBST (XEXP (x, i), new_rtx);
5502 	    }
5503 	}
5504     }
5505 
5506   /* Check if we are loading something from the constant pool via float
5507      extension; in this case we would undo compress_float_constant
5508      optimization and degenerate constant load to an immediate value.  */
5509   if (GET_CODE (x) == FLOAT_EXTEND
5510       && MEM_P (XEXP (x, 0))
5511       && MEM_READONLY_P (XEXP (x, 0)))
5512     {
5513       rtx tmp = avoid_constant_pool_reference (x);
5514       if (x != tmp)
5515         return x;
5516     }
5517 
5518   /* Try to simplify X.  If the simplification changed the code, it is likely
5519      that further simplification will help, so loop, but limit the number
5520      of repetitions that will be performed.  */
5521 
5522   for (i = 0; i < 4; i++)
5523     {
5524       /* If X is sufficiently simple, don't bother trying to do anything
5525 	 with it.  */
5526       if (code != CONST_INT && code != REG && code != CLOBBER)
5527 	x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5528 
5529       if (GET_CODE (x) == code)
5530 	break;
5531 
5532       code = GET_CODE (x);
5533 
5534       /* We no longer know the original mode of operand 0 since we
5535 	 have changed the form of X)  */
5536       op0_mode = VOIDmode;
5537     }
5538 
5539   return x;
5540 }
5541 
5542 /* If X is a commutative operation whose operands are not in the canonical
5543    order, use substitutions to swap them.  */
5544 
5545 static void
5546 maybe_swap_commutative_operands (rtx x)
5547 {
5548   if (COMMUTATIVE_ARITH_P (x)
5549       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5550     {
5551       rtx temp = XEXP (x, 0);
5552       SUBST (XEXP (x, 0), XEXP (x, 1));
5553       SUBST (XEXP (x, 1), temp);
5554     }
5555 }
5556 
5557 /* Simplify X, a piece of RTL.  We just operate on the expression at the
5558    outer level; call `subst' to simplify recursively.  Return the new
5559    expression.
5560 
5561    OP0_MODE is the original mode of XEXP (x, 0).  IN_DEST is nonzero
5562    if we are inside a SET_DEST.  IN_COND is nonzero if we are at the top level
5563    of a condition.  */
5564 
5565 static rtx
5566 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5567 		      int in_cond)
5568 {
5569   enum rtx_code code = GET_CODE (x);
5570   machine_mode mode = GET_MODE (x);
5571   rtx temp;
5572   int i;
5573 
5574   /* If this is a commutative operation, put a constant last and a complex
5575      expression first.  We don't need to do this for comparisons here.  */
5576   maybe_swap_commutative_operands (x);
5577 
5578   /* Try to fold this expression in case we have constants that weren't
5579      present before.  */
5580   temp = 0;
5581   switch (GET_RTX_CLASS (code))
5582     {
5583     case RTX_UNARY:
5584       if (op0_mode == VOIDmode)
5585 	op0_mode = GET_MODE (XEXP (x, 0));
5586       temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5587       break;
5588     case RTX_COMPARE:
5589     case RTX_COMM_COMPARE:
5590       {
5591 	machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5592 	if (cmp_mode == VOIDmode)
5593 	  {
5594 	    cmp_mode = GET_MODE (XEXP (x, 1));
5595 	    if (cmp_mode == VOIDmode)
5596 	      cmp_mode = op0_mode;
5597 	  }
5598 	temp = simplify_relational_operation (code, mode, cmp_mode,
5599 					      XEXP (x, 0), XEXP (x, 1));
5600       }
5601       break;
5602     case RTX_COMM_ARITH:
5603     case RTX_BIN_ARITH:
5604       temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5605       break;
5606     case RTX_BITFIELD_OPS:
5607     case RTX_TERNARY:
5608       temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5609 					 XEXP (x, 1), XEXP (x, 2));
5610       break;
5611     default:
5612       break;
5613     }
5614 
5615   if (temp)
5616     {
5617       x = temp;
5618       code = GET_CODE (temp);
5619       op0_mode = VOIDmode;
5620       mode = GET_MODE (temp);
5621     }
5622 
5623   /* If this is a simple operation applied to an IF_THEN_ELSE, try
5624      applying it to the arms of the IF_THEN_ELSE.  This often simplifies
5625      things.  Check for cases where both arms are testing the same
5626      condition.
5627 
5628      Don't do anything if all operands are very simple.  */
5629 
5630   if ((BINARY_P (x)
5631        && ((!OBJECT_P (XEXP (x, 0))
5632 	    && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5633 		  && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5634 	   || (!OBJECT_P (XEXP (x, 1))
5635 	       && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5636 		     && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5637       || (UNARY_P (x)
5638 	  && (!OBJECT_P (XEXP (x, 0))
5639 	       && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5640 		     && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5641     {
5642       rtx cond, true_rtx, false_rtx;
5643 
5644       cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5645       if (cond != 0
5646 	  /* If everything is a comparison, what we have is highly unlikely
5647 	     to be simpler, so don't use it.  */
5648 	  && ! (COMPARISON_P (x)
5649 		&& (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5650 	  /* Similarly, if we end up with one of the expressions the same
5651 	     as the original, it is certainly not simpler.  */
5652 	  && ! rtx_equal_p (x, true_rtx)
5653 	  && ! rtx_equal_p (x, false_rtx))
5654 	{
5655 	  rtx cop1 = const0_rtx;
5656 	  enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5657 
5658 	  if (cond_code == NE && COMPARISON_P (cond))
5659 	    return x;
5660 
5661 	  /* Simplify the alternative arms; this may collapse the true and
5662 	     false arms to store-flag values.  Be careful to use copy_rtx
5663 	     here since true_rtx or false_rtx might share RTL with x as a
5664 	     result of the if_then_else_cond call above.  */
5665 	  true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5666 	  false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5667 
5668 	  /* If true_rtx and false_rtx are not general_operands, an if_then_else
5669 	     is unlikely to be simpler.  */
5670 	  if (general_operand (true_rtx, VOIDmode)
5671 	      && general_operand (false_rtx, VOIDmode))
5672 	    {
5673 	      enum rtx_code reversed;
5674 
5675 	      /* Restarting if we generate a store-flag expression will cause
5676 		 us to loop.  Just drop through in this case.  */
5677 
5678 	      /* If the result values are STORE_FLAG_VALUE and zero, we can
5679 		 just make the comparison operation.  */
5680 	      if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5681 		x = simplify_gen_relational (cond_code, mode, VOIDmode,
5682 					     cond, cop1);
5683 	      else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5684 		       && ((reversed = reversed_comparison_code_parts
5685 					(cond_code, cond, cop1, NULL))
5686 			   != UNKNOWN))
5687 		x = simplify_gen_relational (reversed, mode, VOIDmode,
5688 					     cond, cop1);
5689 
5690 	      /* Likewise, we can make the negate of a comparison operation
5691 		 if the result values are - STORE_FLAG_VALUE and zero.  */
5692 	      else if (CONST_INT_P (true_rtx)
5693 		       && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5694 		       && false_rtx == const0_rtx)
5695 		x = simplify_gen_unary (NEG, mode,
5696 					simplify_gen_relational (cond_code,
5697 								 mode, VOIDmode,
5698 								 cond, cop1),
5699 					mode);
5700 	      else if (CONST_INT_P (false_rtx)
5701 		       && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5702 		       && true_rtx == const0_rtx
5703 		       && ((reversed = reversed_comparison_code_parts
5704 					(cond_code, cond, cop1, NULL))
5705 			   != UNKNOWN))
5706 		x = simplify_gen_unary (NEG, mode,
5707 					simplify_gen_relational (reversed,
5708 								 mode, VOIDmode,
5709 								 cond, cop1),
5710 					mode);
5711 	      else
5712 		return gen_rtx_IF_THEN_ELSE (mode,
5713 					     simplify_gen_relational (cond_code,
5714 								      mode,
5715 								      VOIDmode,
5716 								      cond,
5717 								      cop1),
5718 					     true_rtx, false_rtx);
5719 
5720 	      code = GET_CODE (x);
5721 	      op0_mode = VOIDmode;
5722 	    }
5723 	}
5724     }
5725 
5726   /* First see if we can apply the inverse distributive law.  */
5727   if (code == PLUS || code == MINUS
5728       || code == AND || code == IOR || code == XOR)
5729     {
5730       x = apply_distributive_law (x);
5731       code = GET_CODE (x);
5732       op0_mode = VOIDmode;
5733     }
5734 
5735   /* If CODE is an associative operation not otherwise handled, see if we
5736      can associate some operands.  This can win if they are constants or
5737      if they are logically related (i.e. (a & b) & a).  */
5738   if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5739        || code == AND || code == IOR || code == XOR
5740        || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5741       && ((INTEGRAL_MODE_P (mode) && code != DIV)
5742 	  || (flag_associative_math && FLOAT_MODE_P (mode))))
5743     {
5744       if (GET_CODE (XEXP (x, 0)) == code)
5745 	{
5746 	  rtx other = XEXP (XEXP (x, 0), 0);
5747 	  rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5748 	  rtx inner_op1 = XEXP (x, 1);
5749 	  rtx inner;
5750 
5751 	  /* Make sure we pass the constant operand if any as the second
5752 	     one if this is a commutative operation.  */
5753 	  if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5754 	    std::swap (inner_op0, inner_op1);
5755 	  inner = simplify_binary_operation (code == MINUS ? PLUS
5756 					     : code == DIV ? MULT
5757 					     : code,
5758 					     mode, inner_op0, inner_op1);
5759 
5760 	  /* For commutative operations, try the other pair if that one
5761 	     didn't simplify.  */
5762 	  if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5763 	    {
5764 	      other = XEXP (XEXP (x, 0), 1);
5765 	      inner = simplify_binary_operation (code, mode,
5766 						 XEXP (XEXP (x, 0), 0),
5767 						 XEXP (x, 1));
5768 	    }
5769 
5770 	  if (inner)
5771 	    return simplify_gen_binary (code, mode, other, inner);
5772 	}
5773     }
5774 
5775   /* A little bit of algebraic simplification here.  */
5776   switch (code)
5777     {
5778     case MEM:
5779       /* Ensure that our address has any ASHIFTs converted to MULT in case
5780 	 address-recognizing predicates are called later.  */
5781       temp = make_compound_operation (XEXP (x, 0), MEM);
5782       SUBST (XEXP (x, 0), temp);
5783       break;
5784 
5785     case SUBREG:
5786       if (op0_mode == VOIDmode)
5787 	op0_mode = GET_MODE (SUBREG_REG (x));
5788 
5789       /* See if this can be moved to simplify_subreg.  */
5790       if (CONSTANT_P (SUBREG_REG (x))
5791 	  && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5792 	     /* Don't call gen_lowpart if the inner mode
5793 		is VOIDmode and we cannot simplify it, as SUBREG without
5794 		inner mode is invalid.  */
5795 	  && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5796 	      || gen_lowpart_common (mode, SUBREG_REG (x))))
5797 	return gen_lowpart (mode, SUBREG_REG (x));
5798 
5799       if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5800 	break;
5801       {
5802 	rtx temp;
5803 	temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5804 				SUBREG_BYTE (x));
5805 	if (temp)
5806 	  return temp;
5807 
5808 	/* If op is known to have all lower bits zero, the result is zero.  */
5809 	if (!in_dest
5810 	    && SCALAR_INT_MODE_P (mode)
5811 	    && SCALAR_INT_MODE_P (op0_mode)
5812 	    && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (op0_mode)
5813 	    && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5814 	    && HWI_COMPUTABLE_MODE_P (op0_mode)
5815 	    && ((nonzero_bits (SUBREG_REG (x), op0_mode)
5816 		 & GET_MODE_MASK (mode)) == 0)
5817 	    && !side_effects_p (SUBREG_REG (x)))
5818 	  return CONST0_RTX (mode);
5819       }
5820 
5821       /* Don't change the mode of the MEM if that would change the meaning
5822 	 of the address.  */
5823       if (MEM_P (SUBREG_REG (x))
5824 	  && (MEM_VOLATILE_P (SUBREG_REG (x))
5825 	      || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5826 					   MEM_ADDR_SPACE (SUBREG_REG (x)))))
5827 	return gen_rtx_CLOBBER (mode, const0_rtx);
5828 
5829       /* Note that we cannot do any narrowing for non-constants since
5830 	 we might have been counting on using the fact that some bits were
5831 	 zero.  We now do this in the SET.  */
5832 
5833       break;
5834 
5835     case NEG:
5836       temp = expand_compound_operation (XEXP (x, 0));
5837 
5838       /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5839 	 replaced by (lshiftrt X C).  This will convert
5840 	 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y).  */
5841 
5842       if (GET_CODE (temp) == ASHIFTRT
5843 	  && CONST_INT_P (XEXP (temp, 1))
5844 	  && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5845 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5846 				     INTVAL (XEXP (temp, 1)));
5847 
5848       /* If X has only a single bit that might be nonzero, say, bit I, convert
5849 	 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5850 	 MODE minus 1.  This will convert (neg (zero_extract X 1 Y)) to
5851 	 (sign_extract X 1 Y).  But only do this if TEMP isn't a register
5852 	 or a SUBREG of one since we'd be making the expression more
5853 	 complex if it was just a register.  */
5854 
5855       if (!REG_P (temp)
5856 	  && ! (GET_CODE (temp) == SUBREG
5857 		&& REG_P (SUBREG_REG (temp)))
5858 	  && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5859 	{
5860 	  rtx temp1 = simplify_shift_const
5861 	    (NULL_RTX, ASHIFTRT, mode,
5862 	     simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5863 				   GET_MODE_PRECISION (mode) - 1 - i),
5864 	     GET_MODE_PRECISION (mode) - 1 - i);
5865 
5866 	  /* If all we did was surround TEMP with the two shifts, we
5867 	     haven't improved anything, so don't use it.  Otherwise,
5868 	     we are better off with TEMP1.  */
5869 	  if (GET_CODE (temp1) != ASHIFTRT
5870 	      || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5871 	      || XEXP (XEXP (temp1, 0), 0) != temp)
5872 	    return temp1;
5873 	}
5874       break;
5875 
5876     case TRUNCATE:
5877       /* We can't handle truncation to a partial integer mode here
5878 	 because we don't know the real bitsize of the partial
5879 	 integer mode.  */
5880       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5881 	break;
5882 
5883       if (HWI_COMPUTABLE_MODE_P (mode))
5884 	SUBST (XEXP (x, 0),
5885 	       force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5886 			      GET_MODE_MASK (mode), 0));
5887 
5888       /* We can truncate a constant value and return it.  */
5889       if (CONST_INT_P (XEXP (x, 0)))
5890 	return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5891 
5892       /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5893 	 whose value is a comparison can be replaced with a subreg if
5894 	 STORE_FLAG_VALUE permits.  */
5895       if (HWI_COMPUTABLE_MODE_P (mode)
5896 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5897 	  && (temp = get_last_value (XEXP (x, 0)))
5898 	  && COMPARISON_P (temp))
5899 	return gen_lowpart (mode, XEXP (x, 0));
5900       break;
5901 
5902     case CONST:
5903       /* (const (const X)) can become (const X).  Do it this way rather than
5904 	 returning the inner CONST since CONST can be shared with a
5905 	 REG_EQUAL note.  */
5906       if (GET_CODE (XEXP (x, 0)) == CONST)
5907 	SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5908       break;
5909 
5910     case LO_SUM:
5911       /* Convert (lo_sum (high FOO) FOO) to FOO.  This is necessary so we
5912 	 can add in an offset.  find_split_point will split this address up
5913 	 again if it doesn't match.  */
5914       if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5915 	  && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5916 	return XEXP (x, 1);
5917       break;
5918 
5919     case PLUS:
5920       /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5921 	 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5922 	 bit-field and can be replaced by either a sign_extend or a
5923 	 sign_extract.  The `and' may be a zero_extend and the two
5924 	 <c>, -<c> constants may be reversed.  */
5925       if (GET_CODE (XEXP (x, 0)) == XOR
5926 	  && CONST_INT_P (XEXP (x, 1))
5927 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5928 	  && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5929 	  && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5930 	      || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5931 	  && HWI_COMPUTABLE_MODE_P (mode)
5932 	  && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5933 	       && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5934 	       && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5935 		   == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5936 	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5937 		  && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5938 		      == (unsigned int) i + 1))))
5939 	return simplify_shift_const
5940 	  (NULL_RTX, ASHIFTRT, mode,
5941 	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
5942 				 XEXP (XEXP (XEXP (x, 0), 0), 0),
5943 				 GET_MODE_PRECISION (mode) - (i + 1)),
5944 	   GET_MODE_PRECISION (mode) - (i + 1));
5945 
5946       /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5947 	 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5948 	 the bitsize of the mode - 1.  This allows simplification of
5949 	 "a = (b & 8) == 0;"  */
5950       if (XEXP (x, 1) == constm1_rtx
5951 	  && !REG_P (XEXP (x, 0))
5952 	  && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5953 		&& REG_P (SUBREG_REG (XEXP (x, 0))))
5954 	  && nonzero_bits (XEXP (x, 0), mode) == 1)
5955 	return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5956 	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
5957 				 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5958 				 GET_MODE_PRECISION (mode) - 1),
5959 	   GET_MODE_PRECISION (mode) - 1);
5960 
5961       /* If we are adding two things that have no bits in common, convert
5962 	 the addition into an IOR.  This will often be further simplified,
5963 	 for example in cases like ((a & 1) + (a & 2)), which can
5964 	 become a & 3.  */
5965 
5966       if (HWI_COMPUTABLE_MODE_P (mode)
5967 	  && (nonzero_bits (XEXP (x, 0), mode)
5968 	      & nonzero_bits (XEXP (x, 1), mode)) == 0)
5969 	{
5970 	  /* Try to simplify the expression further.  */
5971 	  rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5972 	  temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5973 
5974 	  /* If we could, great.  If not, do not go ahead with the IOR
5975 	     replacement, since PLUS appears in many special purpose
5976 	     address arithmetic instructions.  */
5977 	  if (GET_CODE (temp) != CLOBBER
5978 	      && (GET_CODE (temp) != IOR
5979 		  || ((XEXP (temp, 0) != XEXP (x, 0)
5980 		       || XEXP (temp, 1) != XEXP (x, 1))
5981 		      && (XEXP (temp, 0) != XEXP (x, 1)
5982 			  || XEXP (temp, 1) != XEXP (x, 0)))))
5983 	    return temp;
5984 	}
5985 
5986       /* Canonicalize x + x into x << 1.  */
5987       if (GET_MODE_CLASS (mode) == MODE_INT
5988 	  && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
5989 	  && !side_effects_p (XEXP (x, 0)))
5990 	return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
5991 
5992       break;
5993 
5994     case MINUS:
5995       /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5996 	 (and <foo> (const_int pow2-1))  */
5997       if (GET_CODE (XEXP (x, 1)) == AND
5998 	  && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5999 	  && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6000 	  && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6001 	return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
6002 				       -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6003       break;
6004 
6005     case MULT:
6006       /* If we have (mult (plus A B) C), apply the distributive law and then
6007 	 the inverse distributive law to see if things simplify.  This
6008 	 occurs mostly in addresses, often when unrolling loops.  */
6009 
6010       if (GET_CODE (XEXP (x, 0)) == PLUS)
6011 	{
6012 	  rtx result = distribute_and_simplify_rtx (x, 0);
6013 	  if (result)
6014 	    return result;
6015 	}
6016 
6017       /* Try simplify a*(b/c) as (a*b)/c.  */
6018       if (FLOAT_MODE_P (mode) && flag_associative_math
6019 	  && GET_CODE (XEXP (x, 0)) == DIV)
6020 	{
6021 	  rtx tem = simplify_binary_operation (MULT, mode,
6022 					       XEXP (XEXP (x, 0), 0),
6023 					       XEXP (x, 1));
6024 	  if (tem)
6025 	    return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6026 	}
6027       break;
6028 
6029     case UDIV:
6030       /* If this is a divide by a power of two, treat it as a shift if
6031 	 its first operand is a shift.  */
6032       if (CONST_INT_P (XEXP (x, 1))
6033 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6034 	  && (GET_CODE (XEXP (x, 0)) == ASHIFT
6035 	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6036 	      || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6037 	      || GET_CODE (XEXP (x, 0)) == ROTATE
6038 	      || GET_CODE (XEXP (x, 0)) == ROTATERT))
6039 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
6040       break;
6041 
6042     case EQ:  case NE:
6043     case GT:  case GTU:  case GE:  case GEU:
6044     case LT:  case LTU:  case LE:  case LEU:
6045     case UNEQ:  case LTGT:
6046     case UNGT:  case UNGE:
6047     case UNLT:  case UNLE:
6048     case UNORDERED: case ORDERED:
6049       /* If the first operand is a condition code, we can't do anything
6050 	 with it.  */
6051       if (GET_CODE (XEXP (x, 0)) == COMPARE
6052 	  || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6053 	      && ! CC0_P (XEXP (x, 0))))
6054 	{
6055 	  rtx op0 = XEXP (x, 0);
6056 	  rtx op1 = XEXP (x, 1);
6057 	  enum rtx_code new_code;
6058 
6059 	  if (GET_CODE (op0) == COMPARE)
6060 	    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6061 
6062 	  /* Simplify our comparison, if possible.  */
6063 	  new_code = simplify_comparison (code, &op0, &op1);
6064 
6065 	  /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6066 	     if only the low-order bit is possibly nonzero in X (such as when
6067 	     X is a ZERO_EXTRACT of one bit).  Similarly, we can convert EQ to
6068 	     (xor X 1) or (minus 1 X); we use the former.  Finally, if X is
6069 	     known to be either 0 or -1, NE becomes a NEG and EQ becomes
6070 	     (plus X 1).
6071 
6072 	     Remove any ZERO_EXTRACT we made when thinking this was a
6073 	     comparison.  It may now be simpler to use, e.g., an AND.  If a
6074 	     ZERO_EXTRACT is indeed appropriate, it will be placed back by
6075 	     the call to make_compound_operation in the SET case.
6076 
6077 	     Don't apply these optimizations if the caller would
6078 	     prefer a comparison rather than a value.
6079 	     E.g., for the condition in an IF_THEN_ELSE most targets need
6080 	     an explicit comparison.  */
6081 
6082 	  if (in_cond)
6083 	    ;
6084 
6085 	  else if (STORE_FLAG_VALUE == 1
6086 	      && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6087 	      && op1 == const0_rtx
6088 	      && mode == GET_MODE (op0)
6089 	      && nonzero_bits (op0, mode) == 1)
6090 	    return gen_lowpart (mode,
6091 				expand_compound_operation (op0));
6092 
6093 	  else if (STORE_FLAG_VALUE == 1
6094 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6095 		   && op1 == const0_rtx
6096 		   && mode == GET_MODE (op0)
6097 		   && (num_sign_bit_copies (op0, mode)
6098 		       == GET_MODE_PRECISION (mode)))
6099 	    {
6100 	      op0 = expand_compound_operation (op0);
6101 	      return simplify_gen_unary (NEG, mode,
6102 					 gen_lowpart (mode, op0),
6103 					 mode);
6104 	    }
6105 
6106 	  else if (STORE_FLAG_VALUE == 1
6107 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6108 		   && op1 == const0_rtx
6109 		   && mode == GET_MODE (op0)
6110 		   && nonzero_bits (op0, mode) == 1)
6111 	    {
6112 	      op0 = expand_compound_operation (op0);
6113 	      return simplify_gen_binary (XOR, mode,
6114 					  gen_lowpart (mode, op0),
6115 					  const1_rtx);
6116 	    }
6117 
6118 	  else if (STORE_FLAG_VALUE == 1
6119 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6120 		   && op1 == const0_rtx
6121 		   && mode == GET_MODE (op0)
6122 		   && (num_sign_bit_copies (op0, mode)
6123 		       == GET_MODE_PRECISION (mode)))
6124 	    {
6125 	      op0 = expand_compound_operation (op0);
6126 	      return plus_constant (mode, gen_lowpart (mode, op0), 1);
6127 	    }
6128 
6129 	  /* If STORE_FLAG_VALUE is -1, we have cases similar to
6130 	     those above.  */
6131 	  if (in_cond)
6132 	    ;
6133 
6134 	  else if (STORE_FLAG_VALUE == -1
6135 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6136 		   && op1 == const0_rtx
6137 		   && mode == GET_MODE (op0)
6138 		   && (num_sign_bit_copies (op0, mode)
6139 		       == GET_MODE_PRECISION (mode)))
6140 	    return gen_lowpart (mode,
6141 				expand_compound_operation (op0));
6142 
6143 	  else if (STORE_FLAG_VALUE == -1
6144 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6145 		   && op1 == const0_rtx
6146 		   && mode == GET_MODE (op0)
6147 		   && nonzero_bits (op0, mode) == 1)
6148 	    {
6149 	      op0 = expand_compound_operation (op0);
6150 	      return simplify_gen_unary (NEG, mode,
6151 					 gen_lowpart (mode, op0),
6152 					 mode);
6153 	    }
6154 
6155 	  else if (STORE_FLAG_VALUE == -1
6156 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6157 		   && op1 == const0_rtx
6158 		   && mode == GET_MODE (op0)
6159 		   && (num_sign_bit_copies (op0, mode)
6160 		       == GET_MODE_PRECISION (mode)))
6161 	    {
6162 	      op0 = expand_compound_operation (op0);
6163 	      return simplify_gen_unary (NOT, mode,
6164 					 gen_lowpart (mode, op0),
6165 					 mode);
6166 	    }
6167 
6168 	  /* If X is 0/1, (eq X 0) is X-1.  */
6169 	  else if (STORE_FLAG_VALUE == -1
6170 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6171 		   && op1 == const0_rtx
6172 		   && mode == GET_MODE (op0)
6173 		   && nonzero_bits (op0, mode) == 1)
6174 	    {
6175 	      op0 = expand_compound_operation (op0);
6176 	      return plus_constant (mode, gen_lowpart (mode, op0), -1);
6177 	    }
6178 
6179 	  /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6180 	     one bit that might be nonzero, we can convert (ne x 0) to
6181 	     (ashift x c) where C puts the bit in the sign bit.  Remove any
6182 	     AND with STORE_FLAG_VALUE when we are done, since we are only
6183 	     going to test the sign bit.  */
6184 	  if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6185 	      && HWI_COMPUTABLE_MODE_P (mode)
6186 	      && val_signbit_p (mode, STORE_FLAG_VALUE)
6187 	      && op1 == const0_rtx
6188 	      && mode == GET_MODE (op0)
6189 	      && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
6190 	    {
6191 	      x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6192 					expand_compound_operation (op0),
6193 					GET_MODE_PRECISION (mode) - 1 - i);
6194 	      if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6195 		return XEXP (x, 0);
6196 	      else
6197 		return x;
6198 	    }
6199 
6200 	  /* If the code changed, return a whole new comparison.
6201 	     We also need to avoid using SUBST in cases where
6202 	     simplify_comparison has widened a comparison with a CONST_INT,
6203 	     since in that case the wider CONST_INT may fail the sanity
6204 	     checks in do_SUBST.  */
6205 	  if (new_code != code
6206 	      || (CONST_INT_P (op1)
6207 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6208 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6209 	    return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6210 
6211 	  /* Otherwise, keep this operation, but maybe change its operands.
6212 	     This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR).  */
6213 	  SUBST (XEXP (x, 0), op0);
6214 	  SUBST (XEXP (x, 1), op1);
6215 	}
6216       break;
6217 
6218     case IF_THEN_ELSE:
6219       return simplify_if_then_else (x);
6220 
6221     case ZERO_EXTRACT:
6222     case SIGN_EXTRACT:
6223     case ZERO_EXTEND:
6224     case SIGN_EXTEND:
6225       /* If we are processing SET_DEST, we are done.  */
6226       if (in_dest)
6227 	return x;
6228 
6229       return expand_compound_operation (x);
6230 
6231     case SET:
6232       return simplify_set (x);
6233 
6234     case AND:
6235     case IOR:
6236       return simplify_logical (x);
6237 
6238     case ASHIFT:
6239     case LSHIFTRT:
6240     case ASHIFTRT:
6241     case ROTATE:
6242     case ROTATERT:
6243       /* If this is a shift by a constant amount, simplify it.  */
6244       if (CONST_INT_P (XEXP (x, 1)))
6245 	return simplify_shift_const (x, code, mode, XEXP (x, 0),
6246 				     INTVAL (XEXP (x, 1)));
6247 
6248       else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6249 	SUBST (XEXP (x, 1),
6250 	       force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6251 			      (HOST_WIDE_INT_1U
6252 			       << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6253 			      - 1,
6254 			      0));
6255       break;
6256 
6257     default:
6258       break;
6259     }
6260 
6261   return x;
6262 }
6263 
6264 /* Simplify X, an IF_THEN_ELSE expression.  Return the new expression.  */
6265 
6266 static rtx
6267 simplify_if_then_else (rtx x)
6268 {
6269   machine_mode mode = GET_MODE (x);
6270   rtx cond = XEXP (x, 0);
6271   rtx true_rtx = XEXP (x, 1);
6272   rtx false_rtx = XEXP (x, 2);
6273   enum rtx_code true_code = GET_CODE (cond);
6274   int comparison_p = COMPARISON_P (cond);
6275   rtx temp;
6276   int i;
6277   enum rtx_code false_code;
6278   rtx reversed;
6279 
6280   /* Simplify storing of the truth value.  */
6281   if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6282     return simplify_gen_relational (true_code, mode, VOIDmode,
6283 				    XEXP (cond, 0), XEXP (cond, 1));
6284 
6285   /* Also when the truth value has to be reversed.  */
6286   if (comparison_p
6287       && true_rtx == const0_rtx && false_rtx == const_true_rtx
6288       && (reversed = reversed_comparison (cond, mode)))
6289     return reversed;
6290 
6291   /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6292      in it is being compared against certain values.  Get the true and false
6293      comparisons and see if that says anything about the value of each arm.  */
6294 
6295   if (comparison_p
6296       && ((false_code = reversed_comparison_code (cond, NULL))
6297 	  != UNKNOWN)
6298       && REG_P (XEXP (cond, 0)))
6299     {
6300       HOST_WIDE_INT nzb;
6301       rtx from = XEXP (cond, 0);
6302       rtx true_val = XEXP (cond, 1);
6303       rtx false_val = true_val;
6304       int swapped = 0;
6305 
6306       /* If FALSE_CODE is EQ, swap the codes and arms.  */
6307 
6308       if (false_code == EQ)
6309 	{
6310 	  swapped = 1, true_code = EQ, false_code = NE;
6311 	  std::swap (true_rtx, false_rtx);
6312 	}
6313 
6314       /* If we are comparing against zero and the expression being tested has
6315 	 only a single bit that might be nonzero, that is its value when it is
6316 	 not equal to zero.  Similarly if it is known to be -1 or 0.  */
6317 
6318       if (true_code == EQ && true_val == const0_rtx
6319 	  && pow2p_hwi (nzb = nonzero_bits (from, GET_MODE (from))))
6320 	{
6321 	  false_code = EQ;
6322 	  false_val = gen_int_mode (nzb, GET_MODE (from));
6323 	}
6324       else if (true_code == EQ && true_val == const0_rtx
6325 	       && (num_sign_bit_copies (from, GET_MODE (from))
6326 		   == GET_MODE_PRECISION (GET_MODE (from))))
6327 	{
6328 	  false_code = EQ;
6329 	  false_val = constm1_rtx;
6330 	}
6331 
6332       /* Now simplify an arm if we know the value of the register in the
6333 	 branch and it is used in the arm.  Be careful due to the potential
6334 	 of locally-shared RTL.  */
6335 
6336       if (reg_mentioned_p (from, true_rtx))
6337 	true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6338 				      from, true_val),
6339 			  pc_rtx, pc_rtx, 0, 0, 0);
6340       if (reg_mentioned_p (from, false_rtx))
6341 	false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6342 				       from, false_val),
6343 			   pc_rtx, pc_rtx, 0, 0, 0);
6344 
6345       SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6346       SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6347 
6348       true_rtx = XEXP (x, 1);
6349       false_rtx = XEXP (x, 2);
6350       true_code = GET_CODE (cond);
6351     }
6352 
6353   /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6354      reversed, do so to avoid needing two sets of patterns for
6355      subtract-and-branch insns.  Similarly if we have a constant in the true
6356      arm, the false arm is the same as the first operand of the comparison, or
6357      the false arm is more complicated than the true arm.  */
6358 
6359   if (comparison_p
6360       && reversed_comparison_code (cond, NULL) != UNKNOWN
6361       && (true_rtx == pc_rtx
6362 	  || (CONSTANT_P (true_rtx)
6363 	      && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6364 	  || true_rtx == const0_rtx
6365 	  || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6366 	  || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6367 	      && !OBJECT_P (false_rtx))
6368 	  || reg_mentioned_p (true_rtx, false_rtx)
6369 	  || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6370     {
6371       true_code = reversed_comparison_code (cond, NULL);
6372       SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6373       SUBST (XEXP (x, 1), false_rtx);
6374       SUBST (XEXP (x, 2), true_rtx);
6375 
6376       std::swap (true_rtx, false_rtx);
6377       cond = XEXP (x, 0);
6378 
6379       /* It is possible that the conditional has been simplified out.  */
6380       true_code = GET_CODE (cond);
6381       comparison_p = COMPARISON_P (cond);
6382     }
6383 
6384   /* If the two arms are identical, we don't need the comparison.  */
6385 
6386   if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6387     return true_rtx;
6388 
6389   /* Convert a == b ? b : a to "a".  */
6390   if (true_code == EQ && ! side_effects_p (cond)
6391       && !HONOR_NANS (mode)
6392       && rtx_equal_p (XEXP (cond, 0), false_rtx)
6393       && rtx_equal_p (XEXP (cond, 1), true_rtx))
6394     return false_rtx;
6395   else if (true_code == NE && ! side_effects_p (cond)
6396 	   && !HONOR_NANS (mode)
6397 	   && rtx_equal_p (XEXP (cond, 0), true_rtx)
6398 	   && rtx_equal_p (XEXP (cond, 1), false_rtx))
6399     return true_rtx;
6400 
6401   /* Look for cases where we have (abs x) or (neg (abs X)).  */
6402 
6403   if (GET_MODE_CLASS (mode) == MODE_INT
6404       && comparison_p
6405       && XEXP (cond, 1) == const0_rtx
6406       && GET_CODE (false_rtx) == NEG
6407       && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6408       && rtx_equal_p (true_rtx, XEXP (cond, 0))
6409       && ! side_effects_p (true_rtx))
6410     switch (true_code)
6411       {
6412       case GT:
6413       case GE:
6414 	return simplify_gen_unary (ABS, mode, true_rtx, mode);
6415       case LT:
6416       case LE:
6417 	return
6418 	  simplify_gen_unary (NEG, mode,
6419 			      simplify_gen_unary (ABS, mode, true_rtx, mode),
6420 			      mode);
6421       default:
6422 	break;
6423       }
6424 
6425   /* Look for MIN or MAX.  */
6426 
6427   if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6428       && comparison_p
6429       && rtx_equal_p (XEXP (cond, 0), true_rtx)
6430       && rtx_equal_p (XEXP (cond, 1), false_rtx)
6431       && ! side_effects_p (cond))
6432     switch (true_code)
6433       {
6434       case GE:
6435       case GT:
6436 	return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6437       case LE:
6438       case LT:
6439 	return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6440       case GEU:
6441       case GTU:
6442 	return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6443       case LEU:
6444       case LTU:
6445 	return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6446       default:
6447 	break;
6448       }
6449 
6450   /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6451      second operand is zero, this can be done as (OP Z (mult COND C2)) where
6452      C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6453      SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6454      We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6455      neither 1 or -1, but it isn't worth checking for.  */
6456 
6457   if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6458       && comparison_p
6459       && GET_MODE_CLASS (mode) == MODE_INT
6460       && ! side_effects_p (x))
6461     {
6462       rtx t = make_compound_operation (true_rtx, SET);
6463       rtx f = make_compound_operation (false_rtx, SET);
6464       rtx cond_op0 = XEXP (cond, 0);
6465       rtx cond_op1 = XEXP (cond, 1);
6466       enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6467       machine_mode m = mode;
6468       rtx z = 0, c1 = NULL_RTX;
6469 
6470       if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6471 	   || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6472 	   || GET_CODE (t) == ASHIFT
6473 	   || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6474 	  && rtx_equal_p (XEXP (t, 0), f))
6475 	c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6476 
6477       /* If an identity-zero op is commutative, check whether there
6478 	 would be a match if we swapped the operands.  */
6479       else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6480 		|| GET_CODE (t) == XOR)
6481 	       && rtx_equal_p (XEXP (t, 1), f))
6482 	c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6483       else if (GET_CODE (t) == SIGN_EXTEND
6484 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6485 		   || GET_CODE (XEXP (t, 0)) == MINUS
6486 		   || GET_CODE (XEXP (t, 0)) == IOR
6487 		   || GET_CODE (XEXP (t, 0)) == XOR
6488 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6489 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6490 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6491 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6492 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6493 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6494 	       && (num_sign_bit_copies (f, GET_MODE (f))
6495 		   > (unsigned int)
6496 		     (GET_MODE_PRECISION (mode)
6497 		      - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6498 	{
6499 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6500 	  extend_op = SIGN_EXTEND;
6501 	  m = GET_MODE (XEXP (t, 0));
6502 	}
6503       else if (GET_CODE (t) == SIGN_EXTEND
6504 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6505 		   || GET_CODE (XEXP (t, 0)) == IOR
6506 		   || GET_CODE (XEXP (t, 0)) == XOR)
6507 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6508 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6509 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6510 	       && (num_sign_bit_copies (f, GET_MODE (f))
6511 		   > (unsigned int)
6512 		     (GET_MODE_PRECISION (mode)
6513 		      - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6514 	{
6515 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6516 	  extend_op = SIGN_EXTEND;
6517 	  m = GET_MODE (XEXP (t, 0));
6518 	}
6519       else if (GET_CODE (t) == ZERO_EXTEND
6520 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6521 		   || GET_CODE (XEXP (t, 0)) == MINUS
6522 		   || GET_CODE (XEXP (t, 0)) == IOR
6523 		   || GET_CODE (XEXP (t, 0)) == XOR
6524 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6525 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6526 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6527 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6528 	       && HWI_COMPUTABLE_MODE_P (mode)
6529 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6530 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6531 	       && ((nonzero_bits (f, GET_MODE (f))
6532 		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6533 		   == 0))
6534 	{
6535 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6536 	  extend_op = ZERO_EXTEND;
6537 	  m = GET_MODE (XEXP (t, 0));
6538 	}
6539       else if (GET_CODE (t) == ZERO_EXTEND
6540 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6541 		   || GET_CODE (XEXP (t, 0)) == IOR
6542 		   || GET_CODE (XEXP (t, 0)) == XOR)
6543 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6544 	       && HWI_COMPUTABLE_MODE_P (mode)
6545 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6546 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6547 	       && ((nonzero_bits (f, GET_MODE (f))
6548 		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6549 		   == 0))
6550 	{
6551 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6552 	  extend_op = ZERO_EXTEND;
6553 	  m = GET_MODE (XEXP (t, 0));
6554 	}
6555 
6556       if (z)
6557 	{
6558 	  machine_mode cm = m;
6559 	  if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6560 	      && GET_MODE (c1) != VOIDmode)
6561 	    cm = GET_MODE (c1);
6562 	  temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6563 						 cond_op0, cond_op1),
6564 			pc_rtx, pc_rtx, 0, 0, 0);
6565 	  temp = simplify_gen_binary (MULT, cm, temp,
6566 				      simplify_gen_binary (MULT, cm, c1,
6567 							   const_true_rtx));
6568 	  temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6569 	  temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6570 
6571 	  if (extend_op != UNKNOWN)
6572 	    temp = simplify_gen_unary (extend_op, mode, temp, m);
6573 
6574 	  return temp;
6575 	}
6576     }
6577 
6578   /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6579      1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6580      negation of a single bit, we can convert this operation to a shift.  We
6581      can actually do this more generally, but it doesn't seem worth it.  */
6582 
6583   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6584       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6585       && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6586 	   && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6587 	  || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6588 	       == GET_MODE_PRECISION (mode))
6589 	      && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6590     return
6591       simplify_shift_const (NULL_RTX, ASHIFT, mode,
6592 			    gen_lowpart (mode, XEXP (cond, 0)), i);
6593 
6594   /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6595      non-zero bit in A is C1.  */
6596   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6597       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6598       && INTEGRAL_MODE_P (GET_MODE (XEXP (cond, 0)))
6599       && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6600 	  == nonzero_bits (XEXP (cond, 0), GET_MODE (XEXP (cond, 0)))
6601       && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6602     {
6603       rtx val = XEXP (cond, 0);
6604       enum machine_mode val_mode = GET_MODE (val);
6605       if (val_mode == mode)
6606         return val;
6607       else if (GET_MODE_PRECISION (val_mode) < GET_MODE_PRECISION (mode))
6608         return simplify_gen_unary (ZERO_EXTEND, mode, val, val_mode);
6609     }
6610 
6611   return x;
6612 }
6613 
6614 /* Simplify X, a SET expression.  Return the new expression.  */
6615 
6616 static rtx
6617 simplify_set (rtx x)
6618 {
6619   rtx src = SET_SRC (x);
6620   rtx dest = SET_DEST (x);
6621   machine_mode mode
6622     = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6623   rtx_insn *other_insn;
6624   rtx *cc_use;
6625 
6626   /* (set (pc) (return)) gets written as (return).  */
6627   if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6628     return src;
6629 
6630   /* Now that we know for sure which bits of SRC we are using, see if we can
6631      simplify the expression for the object knowing that we only need the
6632      low-order bits.  */
6633 
6634   if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6635     {
6636       src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6637       SUBST (SET_SRC (x), src);
6638     }
6639 
6640   /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6641      the comparison result and try to simplify it unless we already have used
6642      undobuf.other_insn.  */
6643   if ((GET_MODE_CLASS (mode) == MODE_CC
6644        || GET_CODE (src) == COMPARE
6645        || CC0_P (dest))
6646       && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6647       && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6648       && COMPARISON_P (*cc_use)
6649       && rtx_equal_p (XEXP (*cc_use, 0), dest))
6650     {
6651       enum rtx_code old_code = GET_CODE (*cc_use);
6652       enum rtx_code new_code;
6653       rtx op0, op1, tmp;
6654       int other_changed = 0;
6655       rtx inner_compare = NULL_RTX;
6656       machine_mode compare_mode = GET_MODE (dest);
6657 
6658       if (GET_CODE (src) == COMPARE)
6659 	{
6660 	  op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6661 	  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6662 	    {
6663 	      inner_compare = op0;
6664 	      op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6665 	    }
6666 	}
6667       else
6668 	op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6669 
6670       tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6671 					   op0, op1);
6672       if (!tmp)
6673 	new_code = old_code;
6674       else if (!CONSTANT_P (tmp))
6675 	{
6676 	  new_code = GET_CODE (tmp);
6677 	  op0 = XEXP (tmp, 0);
6678 	  op1 = XEXP (tmp, 1);
6679 	}
6680       else
6681 	{
6682 	  rtx pat = PATTERN (other_insn);
6683 	  undobuf.other_insn = other_insn;
6684 	  SUBST (*cc_use, tmp);
6685 
6686 	  /* Attempt to simplify CC user.  */
6687 	  if (GET_CODE (pat) == SET)
6688 	    {
6689 	      rtx new_rtx = simplify_rtx (SET_SRC (pat));
6690 	      if (new_rtx != NULL_RTX)
6691 		SUBST (SET_SRC (pat), new_rtx);
6692 	    }
6693 
6694 	  /* Convert X into a no-op move.  */
6695 	  SUBST (SET_DEST (x), pc_rtx);
6696 	  SUBST (SET_SRC (x), pc_rtx);
6697 	  return x;
6698 	}
6699 
6700       /* Simplify our comparison, if possible.  */
6701       new_code = simplify_comparison (new_code, &op0, &op1);
6702 
6703 #ifdef SELECT_CC_MODE
6704       /* If this machine has CC modes other than CCmode, check to see if we
6705 	 need to use a different CC mode here.  */
6706       if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6707 	compare_mode = GET_MODE (op0);
6708       else if (inner_compare
6709 	       && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6710 	       && new_code == old_code
6711 	       && op0 == XEXP (inner_compare, 0)
6712 	       && op1 == XEXP (inner_compare, 1))
6713 	compare_mode = GET_MODE (inner_compare);
6714       else
6715 	compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6716 
6717       /* If the mode changed, we have to change SET_DEST, the mode in the
6718 	 compare, and the mode in the place SET_DEST is used.  If SET_DEST is
6719 	 a hard register, just build new versions with the proper mode.  If it
6720 	 is a pseudo, we lose unless it is only time we set the pseudo, in
6721 	 which case we can safely change its mode.  */
6722       if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6723 	{
6724 	  if (can_change_dest_mode (dest, 0, compare_mode))
6725 	    {
6726 	      unsigned int regno = REGNO (dest);
6727 	      rtx new_dest;
6728 
6729 	      if (regno < FIRST_PSEUDO_REGISTER)
6730 		new_dest = gen_rtx_REG (compare_mode, regno);
6731 	      else
6732 		{
6733 		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6734 		  new_dest = regno_reg_rtx[regno];
6735 		}
6736 
6737 	      SUBST (SET_DEST (x), new_dest);
6738 	      SUBST (XEXP (*cc_use, 0), new_dest);
6739 	      other_changed = 1;
6740 
6741 	      dest = new_dest;
6742 	    }
6743 	}
6744 #endif  /* SELECT_CC_MODE */
6745 
6746       /* If the code changed, we have to build a new comparison in
6747 	 undobuf.other_insn.  */
6748       if (new_code != old_code)
6749 	{
6750 	  int other_changed_previously = other_changed;
6751 	  unsigned HOST_WIDE_INT mask;
6752 	  rtx old_cc_use = *cc_use;
6753 
6754 	  SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6755 					  dest, const0_rtx));
6756 	  other_changed = 1;
6757 
6758 	  /* If the only change we made was to change an EQ into an NE or
6759 	     vice versa, OP0 has only one bit that might be nonzero, and OP1
6760 	     is zero, check if changing the user of the condition code will
6761 	     produce a valid insn.  If it won't, we can keep the original code
6762 	     in that insn by surrounding our operation with an XOR.  */
6763 
6764 	  if (((old_code == NE && new_code == EQ)
6765 	       || (old_code == EQ && new_code == NE))
6766 	      && ! other_changed_previously && op1 == const0_rtx
6767 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6768 	      && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6769 	    {
6770 	      rtx pat = PATTERN (other_insn), note = 0;
6771 
6772 	      if ((recog_for_combine (&pat, other_insn, &note) < 0
6773 		   && ! check_asm_operands (pat)))
6774 		{
6775 		  *cc_use = old_cc_use;
6776 		  other_changed = 0;
6777 
6778 		  op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6779 					     gen_int_mode (mask,
6780 							   GET_MODE (op0)));
6781 		}
6782 	    }
6783 	}
6784 
6785       if (other_changed)
6786 	undobuf.other_insn = other_insn;
6787 
6788       /* Don't generate a compare of a CC with 0, just use that CC.  */
6789       if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6790 	{
6791 	  SUBST (SET_SRC (x), op0);
6792 	  src = SET_SRC (x);
6793 	}
6794       /* Otherwise, if we didn't previously have the same COMPARE we
6795 	 want, create it from scratch.  */
6796       else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6797 	       || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6798 	{
6799 	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6800 	  src = SET_SRC (x);
6801 	}
6802     }
6803   else
6804     {
6805       /* Get SET_SRC in a form where we have placed back any
6806 	 compound expressions.  Then do the checks below.  */
6807       src = make_compound_operation (src, SET);
6808       SUBST (SET_SRC (x), src);
6809     }
6810 
6811   /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6812      and X being a REG or (subreg (reg)), we may be able to convert this to
6813      (set (subreg:m2 x) (op)).
6814 
6815      We can always do this if M1 is narrower than M2 because that means that
6816      we only care about the low bits of the result.
6817 
6818      However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6819      perform a narrower operation than requested since the high-order bits will
6820      be undefined.  On machine where it is defined, this transformation is safe
6821      as long as M1 and M2 have the same number of words.  */
6822 
6823   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6824       && !OBJECT_P (SUBREG_REG (src))
6825       && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6826 	   / UNITS_PER_WORD)
6827 	  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6828 	       + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6829       && (WORD_REGISTER_OPERATIONS
6830 	  || (GET_MODE_SIZE (GET_MODE (src))
6831 	      <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6832 #ifdef CANNOT_CHANGE_MODE_CLASS
6833       && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6834 	    && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6835 					 GET_MODE (SUBREG_REG (src)),
6836 					 GET_MODE (src)))
6837 #endif
6838       && (REG_P (dest)
6839 	  || (GET_CODE (dest) == SUBREG
6840 	      && REG_P (SUBREG_REG (dest)))))
6841     {
6842       SUBST (SET_DEST (x),
6843 	     gen_lowpart (GET_MODE (SUBREG_REG (src)),
6844 				      dest));
6845       SUBST (SET_SRC (x), SUBREG_REG (src));
6846 
6847       src = SET_SRC (x), dest = SET_DEST (x);
6848     }
6849 
6850   /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6851      in SRC.  */
6852   if (dest == cc0_rtx
6853       && GET_CODE (src) == SUBREG
6854       && subreg_lowpart_p (src)
6855       && (GET_MODE_PRECISION (GET_MODE (src))
6856 	  < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6857     {
6858       rtx inner = SUBREG_REG (src);
6859       machine_mode inner_mode = GET_MODE (inner);
6860 
6861       /* Here we make sure that we don't have a sign bit on.  */
6862       if (val_signbit_known_clear_p (GET_MODE (src),
6863 				     nonzero_bits (inner, inner_mode)))
6864 	{
6865 	  SUBST (SET_SRC (x), inner);
6866 	  src = SET_SRC (x);
6867 	}
6868     }
6869 
6870   /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6871      would require a paradoxical subreg.  Replace the subreg with a
6872      zero_extend to avoid the reload that would otherwise be required.  */
6873 
6874   enum rtx_code extend_op;
6875   if (paradoxical_subreg_p (src)
6876       && MEM_P (SUBREG_REG (src))
6877       && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6878     {
6879       SUBST (SET_SRC (x),
6880 	     gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6881 
6882       src = SET_SRC (x);
6883     }
6884 
6885   /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6886      are comparing an item known to be 0 or -1 against 0, use a logical
6887      operation instead. Check for one of the arms being an IOR of the other
6888      arm with some value.  We compute three terms to be IOR'ed together.  In
6889      practice, at most two will be nonzero.  Then we do the IOR's.  */
6890 
6891   if (GET_CODE (dest) != PC
6892       && GET_CODE (src) == IF_THEN_ELSE
6893       && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6894       && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6895       && XEXP (XEXP (src, 0), 1) == const0_rtx
6896       && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6897       && (!HAVE_conditional_move
6898 	  || ! can_conditionally_move_p (GET_MODE (src)))
6899       && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6900 			       GET_MODE (XEXP (XEXP (src, 0), 0)))
6901 	  == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6902       && ! side_effects_p (src))
6903     {
6904       rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6905 		      ? XEXP (src, 1) : XEXP (src, 2));
6906       rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6907 		   ? XEXP (src, 2) : XEXP (src, 1));
6908       rtx term1 = const0_rtx, term2, term3;
6909 
6910       if (GET_CODE (true_rtx) == IOR
6911 	  && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6912 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6913       else if (GET_CODE (true_rtx) == IOR
6914 	       && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6915 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6916       else if (GET_CODE (false_rtx) == IOR
6917 	       && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6918 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6919       else if (GET_CODE (false_rtx) == IOR
6920 	       && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6921 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6922 
6923       term2 = simplify_gen_binary (AND, GET_MODE (src),
6924 				   XEXP (XEXP (src, 0), 0), true_rtx);
6925       term3 = simplify_gen_binary (AND, GET_MODE (src),
6926 				   simplify_gen_unary (NOT, GET_MODE (src),
6927 						       XEXP (XEXP (src, 0), 0),
6928 						       GET_MODE (src)),
6929 				   false_rtx);
6930 
6931       SUBST (SET_SRC (x),
6932 	     simplify_gen_binary (IOR, GET_MODE (src),
6933 				  simplify_gen_binary (IOR, GET_MODE (src),
6934 						       term1, term2),
6935 				  term3));
6936 
6937       src = SET_SRC (x);
6938     }
6939 
6940   /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6941      whole thing fail.  */
6942   if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6943     return src;
6944   else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6945     return dest;
6946   else
6947     /* Convert this into a field assignment operation, if possible.  */
6948     return make_field_assignment (x);
6949 }
6950 
6951 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6952    result.  */
6953 
6954 static rtx
6955 simplify_logical (rtx x)
6956 {
6957   machine_mode mode = GET_MODE (x);
6958   rtx op0 = XEXP (x, 0);
6959   rtx op1 = XEXP (x, 1);
6960 
6961   switch (GET_CODE (x))
6962     {
6963     case AND:
6964       /* We can call simplify_and_const_int only if we don't lose
6965 	 any (sign) bits when converting INTVAL (op1) to
6966 	 "unsigned HOST_WIDE_INT".  */
6967       if (CONST_INT_P (op1)
6968 	  && (HWI_COMPUTABLE_MODE_P (mode)
6969 	      || INTVAL (op1) > 0))
6970 	{
6971 	  x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6972 	  if (GET_CODE (x) != AND)
6973 	    return x;
6974 
6975 	  op0 = XEXP (x, 0);
6976 	  op1 = XEXP (x, 1);
6977 	}
6978 
6979       /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6980 	 apply the distributive law and then the inverse distributive
6981 	 law to see if things simplify.  */
6982       if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6983 	{
6984 	  rtx result = distribute_and_simplify_rtx (x, 0);
6985 	  if (result)
6986 	    return result;
6987 	}
6988       if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6989 	{
6990 	  rtx result = distribute_and_simplify_rtx (x, 1);
6991 	  if (result)
6992 	    return result;
6993 	}
6994       break;
6995 
6996     case IOR:
6997       /* If we have (ior (and A B) C), apply the distributive law and then
6998 	 the inverse distributive law to see if things simplify.  */
6999 
7000       if (GET_CODE (op0) == AND)
7001 	{
7002 	  rtx result = distribute_and_simplify_rtx (x, 0);
7003 	  if (result)
7004 	    return result;
7005 	}
7006 
7007       if (GET_CODE (op1) == AND)
7008 	{
7009 	  rtx result = distribute_and_simplify_rtx (x, 1);
7010 	  if (result)
7011 	    return result;
7012 	}
7013       break;
7014 
7015     default:
7016       gcc_unreachable ();
7017     }
7018 
7019   return x;
7020 }
7021 
7022 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7023    operations" because they can be replaced with two more basic operations.
7024    ZERO_EXTEND is also considered "compound" because it can be replaced with
7025    an AND operation, which is simpler, though only one operation.
7026 
7027    The function expand_compound_operation is called with an rtx expression
7028    and will convert it to the appropriate shifts and AND operations,
7029    simplifying at each stage.
7030 
7031    The function make_compound_operation is called to convert an expression
7032    consisting of shifts and ANDs into the equivalent compound expression.
7033    It is the inverse of this function, loosely speaking.  */
7034 
7035 static rtx
7036 expand_compound_operation (rtx x)
7037 {
7038   unsigned HOST_WIDE_INT pos = 0, len;
7039   int unsignedp = 0;
7040   unsigned int modewidth;
7041   rtx tem;
7042 
7043   switch (GET_CODE (x))
7044     {
7045     case ZERO_EXTEND:
7046       unsignedp = 1;
7047       /* FALLTHRU */
7048     case SIGN_EXTEND:
7049       /* We can't necessarily use a const_int for a multiword mode;
7050 	 it depends on implicitly extending the value.
7051 	 Since we don't know the right way to extend it,
7052 	 we can't tell whether the implicit way is right.
7053 
7054 	 Even for a mode that is no wider than a const_int,
7055 	 we can't win, because we need to sign extend one of its bits through
7056 	 the rest of it, and we don't know which bit.  */
7057       if (CONST_INT_P (XEXP (x, 0)))
7058 	return x;
7059 
7060       /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7061 	 (zero_extend:MODE FROM) or (sign_extend:MODE FROM).  It is for any MEM
7062 	 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7063 	 reloaded. If not for that, MEM's would very rarely be safe.
7064 
7065 	 Reject MODEs bigger than a word, because we might not be able
7066 	 to reference a two-register group starting with an arbitrary register
7067 	 (and currently gen_lowpart might crash for a SUBREG).  */
7068 
7069       if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
7070 	return x;
7071 
7072       /* Reject MODEs that aren't scalar integers because turning vector
7073 	 or complex modes into shifts causes problems.  */
7074 
7075       if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
7076 	return x;
7077 
7078       len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
7079       /* If the inner object has VOIDmode (the only way this can happen
7080 	 is if it is an ASM_OPERANDS), we can't do anything since we don't
7081 	 know how much masking to do.  */
7082       if (len == 0)
7083 	return x;
7084 
7085       break;
7086 
7087     case ZERO_EXTRACT:
7088       unsignedp = 1;
7089 
7090       /* fall through */
7091 
7092     case SIGN_EXTRACT:
7093       /* If the operand is a CLOBBER, just return it.  */
7094       if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7095 	return XEXP (x, 0);
7096 
7097       if (!CONST_INT_P (XEXP (x, 1))
7098 	  || !CONST_INT_P (XEXP (x, 2))
7099 	  || GET_MODE (XEXP (x, 0)) == VOIDmode)
7100 	return x;
7101 
7102       /* Reject MODEs that aren't scalar integers because turning vector
7103 	 or complex modes into shifts causes problems.  */
7104 
7105       if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
7106 	return x;
7107 
7108       len = INTVAL (XEXP (x, 1));
7109       pos = INTVAL (XEXP (x, 2));
7110 
7111       /* This should stay within the object being extracted, fail otherwise.  */
7112       if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
7113 	return x;
7114 
7115       if (BITS_BIG_ENDIAN)
7116 	pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
7117 
7118       break;
7119 
7120     default:
7121       return x;
7122     }
7123   /* Convert sign extension to zero extension, if we know that the high
7124      bit is not set, as this is easier to optimize.  It will be converted
7125      back to cheaper alternative in make_extraction.  */
7126   if (GET_CODE (x) == SIGN_EXTEND
7127       && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7128 	  && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
7129 		& ~(((unsigned HOST_WIDE_INT)
7130 		      GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
7131 		     >> 1))
7132 	       == 0)))
7133     {
7134       machine_mode mode = GET_MODE (x);
7135       rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7136       rtx temp2 = expand_compound_operation (temp);
7137 
7138       /* Make sure this is a profitable operation.  */
7139       if (set_src_cost (x, mode, optimize_this_for_speed_p)
7140           > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7141        return temp2;
7142       else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7143                > set_src_cost (temp, mode, optimize_this_for_speed_p))
7144        return temp;
7145       else
7146        return x;
7147     }
7148 
7149   /* We can optimize some special cases of ZERO_EXTEND.  */
7150   if (GET_CODE (x) == ZERO_EXTEND)
7151     {
7152       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7153 	 know that the last value didn't have any inappropriate bits
7154 	 set.  */
7155       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7156 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7157 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7158 	  && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
7159 	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7160 	return XEXP (XEXP (x, 0), 0);
7161 
7162       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7163       if (GET_CODE (XEXP (x, 0)) == SUBREG
7164 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7165 	  && subreg_lowpart_p (XEXP (x, 0))
7166 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7167 	  && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
7168 	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7169 	return SUBREG_REG (XEXP (x, 0));
7170 
7171       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7172 	 is a comparison and STORE_FLAG_VALUE permits.  This is like
7173 	 the first case, but it works even when GET_MODE (x) is larger
7174 	 than HOST_WIDE_INT.  */
7175       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7176 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7177 	  && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7178 	  && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7179 	      <= HOST_BITS_PER_WIDE_INT)
7180 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7181 	return XEXP (XEXP (x, 0), 0);
7182 
7183       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7184       if (GET_CODE (XEXP (x, 0)) == SUBREG
7185 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7186 	  && subreg_lowpart_p (XEXP (x, 0))
7187 	  && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7188 	  && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7189 	      <= HOST_BITS_PER_WIDE_INT)
7190 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7191 	return SUBREG_REG (XEXP (x, 0));
7192 
7193     }
7194 
7195   /* If we reach here, we want to return a pair of shifts.  The inner
7196      shift is a left shift of BITSIZE - POS - LEN bits.  The outer
7197      shift is a right shift of BITSIZE - LEN bits.  It is arithmetic or
7198      logical depending on the value of UNSIGNEDP.
7199 
7200      If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7201      converted into an AND of a shift.
7202 
7203      We must check for the case where the left shift would have a negative
7204      count.  This can happen in a case like (x >> 31) & 255 on machines
7205      that can't shift by a constant.  On those machines, we would first
7206      combine the shift with the AND to produce a variable-position
7207      extraction.  Then the constant of 31 would be substituted in
7208      to produce such a position.  */
7209 
7210   modewidth = GET_MODE_PRECISION (GET_MODE (x));
7211   if (modewidth >= pos + len)
7212     {
7213       machine_mode mode = GET_MODE (x);
7214       tem = gen_lowpart (mode, XEXP (x, 0));
7215       if (!tem || GET_CODE (tem) == CLOBBER)
7216 	return x;
7217       tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7218 				  tem, modewidth - pos - len);
7219       tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7220 				  mode, tem, modewidth - len);
7221     }
7222   else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7223     tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
7224 				  simplify_shift_const (NULL_RTX, LSHIFTRT,
7225 							GET_MODE (x),
7226 							XEXP (x, 0), pos),
7227 				  (HOST_WIDE_INT_1U << len) - 1);
7228   else
7229     /* Any other cases we can't handle.  */
7230     return x;
7231 
7232   /* If we couldn't do this for some reason, return the original
7233      expression.  */
7234   if (GET_CODE (tem) == CLOBBER)
7235     return x;
7236 
7237   return tem;
7238 }
7239 
7240 /* X is a SET which contains an assignment of one object into
7241    a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7242    or certain SUBREGS). If possible, convert it into a series of
7243    logical operations.
7244 
7245    We half-heartedly support variable positions, but do not at all
7246    support variable lengths.  */
7247 
7248 static const_rtx
7249 expand_field_assignment (const_rtx x)
7250 {
7251   rtx inner;
7252   rtx pos;			/* Always counts from low bit.  */
7253   int len;
7254   rtx mask, cleared, masked;
7255   machine_mode compute_mode;
7256 
7257   /* Loop until we find something we can't simplify.  */
7258   while (1)
7259     {
7260       if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7261 	  && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7262 	{
7263 	  inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7264 	  len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7265 	  pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7266 	}
7267       else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7268 	       && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7269 	{
7270 	  inner = XEXP (SET_DEST (x), 0);
7271 	  len = INTVAL (XEXP (SET_DEST (x), 1));
7272 	  pos = XEXP (SET_DEST (x), 2);
7273 
7274 	  /* A constant position should stay within the width of INNER.  */
7275 	  if (CONST_INT_P (pos)
7276 	      && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7277 	    break;
7278 
7279 	  if (BITS_BIG_ENDIAN)
7280 	    {
7281 	      if (CONST_INT_P (pos))
7282 		pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7283 			       - INTVAL (pos));
7284 	      else if (GET_CODE (pos) == MINUS
7285 		       && CONST_INT_P (XEXP (pos, 1))
7286 		       && (INTVAL (XEXP (pos, 1))
7287 			   == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7288 		/* If position is ADJUST - X, new position is X.  */
7289 		pos = XEXP (pos, 0);
7290 	      else
7291 		{
7292 		  HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7293 		  pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7294 					     gen_int_mode (prec - len,
7295 							   GET_MODE (pos)),
7296 					     pos);
7297 		}
7298 	    }
7299 	}
7300 
7301       /* A SUBREG between two modes that occupy the same numbers of words
7302 	 can be done by moving the SUBREG to the source.  */
7303       else if (GET_CODE (SET_DEST (x)) == SUBREG
7304 	       /* We need SUBREGs to compute nonzero_bits properly.  */
7305 	       && nonzero_sign_valid
7306 	       && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7307 		     + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7308 		   == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7309 			+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7310 	{
7311 	  x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7312 			   gen_lowpart
7313 			   (GET_MODE (SUBREG_REG (SET_DEST (x))),
7314 			    SET_SRC (x)));
7315 	  continue;
7316 	}
7317       else
7318 	break;
7319 
7320       while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7321 	inner = SUBREG_REG (inner);
7322 
7323       compute_mode = GET_MODE (inner);
7324 
7325       /* Don't attempt bitwise arithmetic on non scalar integer modes.  */
7326       if (! SCALAR_INT_MODE_P (compute_mode))
7327 	{
7328 	  machine_mode imode;
7329 
7330 	  /* Don't do anything for vector or complex integral types.  */
7331 	  if (! FLOAT_MODE_P (compute_mode))
7332 	    break;
7333 
7334 	  /* Try to find an integral mode to pun with.  */
7335 	  imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7336 	  if (imode == BLKmode)
7337 	    break;
7338 
7339 	  compute_mode = imode;
7340 	  inner = gen_lowpart (imode, inner);
7341 	}
7342 
7343       /* Compute a mask of LEN bits, if we can do this on the host machine.  */
7344       if (len >= HOST_BITS_PER_WIDE_INT)
7345 	break;
7346 
7347       /* Don't try to compute in too wide unsupported modes.  */
7348       if (!targetm.scalar_mode_supported_p (compute_mode))
7349 	break;
7350 
7351       /* Now compute the equivalent expression.  Make a copy of INNER
7352 	 for the SET_DEST in case it is a MEM into which we will substitute;
7353 	 we don't want shared RTL in that case.  */
7354       mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7355 			   compute_mode);
7356       cleared = simplify_gen_binary (AND, compute_mode,
7357 				     simplify_gen_unary (NOT, compute_mode,
7358 				       simplify_gen_binary (ASHIFT,
7359 							    compute_mode,
7360 							    mask, pos),
7361 				       compute_mode),
7362 				     inner);
7363       masked = simplify_gen_binary (ASHIFT, compute_mode,
7364 				    simplify_gen_binary (
7365 				      AND, compute_mode,
7366 				      gen_lowpart (compute_mode, SET_SRC (x)),
7367 				      mask),
7368 				    pos);
7369 
7370       x = gen_rtx_SET (copy_rtx (inner),
7371 		       simplify_gen_binary (IOR, compute_mode,
7372 					    cleared, masked));
7373     }
7374 
7375   return x;
7376 }
7377 
7378 /* Return an RTX for a reference to LEN bits of INNER.  If POS_RTX is nonzero,
7379    it is an RTX that represents the (variable) starting position; otherwise,
7380    POS is the (constant) starting bit position.  Both are counted from the LSB.
7381 
7382    UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7383 
7384    IN_DEST is nonzero if this is a reference in the destination of a SET.
7385    This is used when a ZERO_ or SIGN_EXTRACT isn't needed.  If nonzero,
7386    a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7387    be used.
7388 
7389    IN_COMPARE is nonzero if we are in a COMPARE.  This means that a
7390    ZERO_EXTRACT should be built even for bits starting at bit 0.
7391 
7392    MODE is the desired mode of the result (if IN_DEST == 0).
7393 
7394    The result is an RTX for the extraction or NULL_RTX if the target
7395    can't handle it.  */
7396 
7397 static rtx
7398 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7399 		 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7400 		 int in_dest, int in_compare)
7401 {
7402   /* This mode describes the size of the storage area
7403      to fetch the overall value from.  Within that, we
7404      ignore the POS lowest bits, etc.  */
7405   machine_mode is_mode = GET_MODE (inner);
7406   machine_mode inner_mode;
7407   machine_mode wanted_inner_mode;
7408   machine_mode wanted_inner_reg_mode = word_mode;
7409   machine_mode pos_mode = word_mode;
7410   machine_mode extraction_mode = word_mode;
7411   machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7412   rtx new_rtx = 0;
7413   rtx orig_pos_rtx = pos_rtx;
7414   HOST_WIDE_INT orig_pos;
7415 
7416   if (pos_rtx && CONST_INT_P (pos_rtx))
7417     pos = INTVAL (pos_rtx), pos_rtx = 0;
7418 
7419   if (GET_CODE (inner) == SUBREG
7420       && subreg_lowpart_p (inner)
7421       && (paradoxical_subreg_p (inner)
7422 	  /* If trying or potentionally trying to extract
7423 	     bits outside of is_mode, don't look through
7424 	     non-paradoxical SUBREGs.  See PR82192.  */
7425 	  || (pos_rtx == NULL_RTX
7426 	      && pos + len <= GET_MODE_PRECISION (is_mode))))
7427     {
7428       /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7429 	 consider just the QI as the memory to extract from.
7430 	 The subreg adds or removes high bits; its mode is
7431 	 irrelevant to the meaning of this extraction,
7432 	 since POS and LEN count from the lsb.  */
7433       if (MEM_P (SUBREG_REG (inner)))
7434 	is_mode = GET_MODE (SUBREG_REG (inner));
7435       inner = SUBREG_REG (inner);
7436     }
7437   else if (GET_CODE (inner) == ASHIFT
7438 	   && CONST_INT_P (XEXP (inner, 1))
7439 	   && pos_rtx == 0 && pos == 0
7440 	   && len > UINTVAL (XEXP (inner, 1)))
7441     {
7442       /* We're extracting the least significant bits of an rtx
7443 	 (ashift X (const_int C)), where LEN > C.  Extract the
7444 	 least significant (LEN - C) bits of X, giving an rtx
7445 	 whose mode is MODE, then shift it left C times.  */
7446       new_rtx = make_extraction (mode, XEXP (inner, 0),
7447 			     0, 0, len - INTVAL (XEXP (inner, 1)),
7448 			     unsignedp, in_dest, in_compare);
7449       if (new_rtx != 0)
7450 	return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7451     }
7452   else if (GET_CODE (inner) == TRUNCATE
7453 	   /* If trying or potentionally trying to extract
7454 	      bits outside of is_mode, don't look through
7455 	      TRUNCATE.  See PR82192.  */
7456 	   && pos_rtx == NULL_RTX
7457 	   && pos + len <= GET_MODE_PRECISION (is_mode))
7458     inner = XEXP (inner, 0);
7459 
7460   inner_mode = GET_MODE (inner);
7461 
7462   /* See if this can be done without an extraction.  We never can if the
7463      width of the field is not the same as that of some integer mode. For
7464      registers, we can only avoid the extraction if the position is at the
7465      low-order bit and this is either not in the destination or we have the
7466      appropriate STRICT_LOW_PART operation available.
7467 
7468      For MEM, we can avoid an extract if the field starts on an appropriate
7469      boundary and we can change the mode of the memory reference.  */
7470 
7471   if (tmode != BLKmode
7472       && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7473 	   && !MEM_P (inner)
7474 	   && (pos == 0 || REG_P (inner))
7475 	   && (inner_mode == tmode
7476 	       || !REG_P (inner)
7477 	       || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7478 	       || reg_truncated_to_mode (tmode, inner))
7479 	   && (! in_dest
7480 	       || (REG_P (inner)
7481 		   && have_insn_for (STRICT_LOW_PART, tmode))))
7482 	  || (MEM_P (inner) && pos_rtx == 0
7483 	      && (pos
7484 		  % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7485 		     : BITS_PER_UNIT)) == 0
7486 	      /* We can't do this if we are widening INNER_MODE (it
7487 		 may not be aligned, for one thing).  */
7488 	      && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7489 	      && pos + len <= GET_MODE_PRECISION (is_mode)
7490 	      && (inner_mode == tmode
7491 		  || (! mode_dependent_address_p (XEXP (inner, 0),
7492 						  MEM_ADDR_SPACE (inner))
7493 		      && ! MEM_VOLATILE_P (inner))))))
7494     {
7495       /* If INNER is a MEM, make a new MEM that encompasses just the desired
7496 	 field.  If the original and current mode are the same, we need not
7497 	 adjust the offset.  Otherwise, we do if bytes big endian.
7498 
7499 	 If INNER is not a MEM, get a piece consisting of just the field
7500 	 of interest (in this case POS % BITS_PER_WORD must be 0).  */
7501 
7502       if (MEM_P (inner))
7503 	{
7504 	  HOST_WIDE_INT offset;
7505 
7506 	  /* POS counts from lsb, but make OFFSET count in memory order.  */
7507 	  if (BYTES_BIG_ENDIAN)
7508 	    offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7509 	  else
7510 	    offset = pos / BITS_PER_UNIT;
7511 
7512 	  new_rtx = adjust_address_nv (inner, tmode, offset);
7513 	}
7514       else if (REG_P (inner))
7515 	{
7516 	  if (tmode != inner_mode)
7517 	    {
7518 	      /* We can't call gen_lowpart in a DEST since we
7519 		 always want a SUBREG (see below) and it would sometimes
7520 		 return a new hard register.  */
7521 	      if (pos || in_dest)
7522 		{
7523 		  HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7524 
7525 		  if (WORDS_BIG_ENDIAN
7526 		      && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7527 		    final_word = ((GET_MODE_SIZE (inner_mode)
7528 				   - GET_MODE_SIZE (tmode))
7529 				  / UNITS_PER_WORD) - final_word;
7530 
7531 		  final_word *= UNITS_PER_WORD;
7532 		  if (BYTES_BIG_ENDIAN &&
7533 		      GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7534 		    final_word += (GET_MODE_SIZE (inner_mode)
7535 				   - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7536 
7537 		  /* Avoid creating invalid subregs, for example when
7538 		     simplifying (x>>32)&255.  */
7539 		  if (!validate_subreg (tmode, inner_mode, inner, final_word))
7540 		    return NULL_RTX;
7541 
7542 		  new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7543 		}
7544 	      else
7545 		new_rtx = gen_lowpart (tmode, inner);
7546 	    }
7547 	  else
7548 	    new_rtx = inner;
7549 	}
7550       else
7551 	new_rtx = force_to_mode (inner, tmode,
7552 				 len >= HOST_BITS_PER_WIDE_INT
7553 				 ? HOST_WIDE_INT_M1U
7554 				 : (HOST_WIDE_INT_1U << len) - 1, 0);
7555 
7556       /* If this extraction is going into the destination of a SET,
7557 	 make a STRICT_LOW_PART unless we made a MEM.  */
7558 
7559       if (in_dest)
7560 	return (MEM_P (new_rtx) ? new_rtx
7561 		: (GET_CODE (new_rtx) != SUBREG
7562 		   ? gen_rtx_CLOBBER (tmode, const0_rtx)
7563 		   : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7564 
7565       if (mode == tmode)
7566 	return new_rtx;
7567 
7568       if (CONST_SCALAR_INT_P (new_rtx))
7569 	return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7570 					 mode, new_rtx, tmode);
7571 
7572       /* If we know that no extraneous bits are set, and that the high
7573 	 bit is not set, convert the extraction to the cheaper of
7574 	 sign and zero extension, that are equivalent in these cases.  */
7575       if (flag_expensive_optimizations
7576 	  && (HWI_COMPUTABLE_MODE_P (tmode)
7577 	      && ((nonzero_bits (new_rtx, tmode)
7578 		   & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7579 		  == 0)))
7580 	{
7581 	  rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7582 	  rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7583 
7584 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7585 	     backends.  */
7586 	  if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7587 	      <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7588 	    return temp;
7589 	  return temp1;
7590 	}
7591 
7592       /* Otherwise, sign- or zero-extend unless we already are in the
7593 	 proper mode.  */
7594 
7595       return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7596 			     mode, new_rtx));
7597     }
7598 
7599   /* Unless this is a COMPARE or we have a funny memory reference,
7600      don't do anything with zero-extending field extracts starting at
7601      the low-order bit since they are simple AND operations.  */
7602   if (pos_rtx == 0 && pos == 0 && ! in_dest
7603       && ! in_compare && unsignedp)
7604     return 0;
7605 
7606   /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7607      if the position is not a constant and the length is not 1.  In all
7608      other cases, we would only be going outside our object in cases when
7609      an original shift would have been undefined.  */
7610   if (MEM_P (inner)
7611       && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7612 	  || (pos_rtx != 0 && len != 1)))
7613     return 0;
7614 
7615   enum extraction_pattern pattern = (in_dest ? EP_insv
7616 				     : unsignedp ? EP_extzv : EP_extv);
7617 
7618   /* If INNER is not from memory, we want it to have the mode of a register
7619      extraction pattern's structure operand, or word_mode if there is no
7620      such pattern.  The same applies to extraction_mode and pos_mode
7621      and their respective operands.
7622 
7623      For memory, assume that the desired extraction_mode and pos_mode
7624      are the same as for a register operation, since at present we don't
7625      have named patterns for aligned memory structures.  */
7626   struct extraction_insn insn;
7627   if (get_best_reg_extraction_insn (&insn, pattern,
7628 				    GET_MODE_BITSIZE (inner_mode), mode))
7629     {
7630       wanted_inner_reg_mode = insn.struct_mode;
7631       pos_mode = insn.pos_mode;
7632       extraction_mode = insn.field_mode;
7633     }
7634 
7635   /* Never narrow an object, since that might not be safe.  */
7636 
7637   if (mode != VOIDmode
7638       && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7639     extraction_mode = mode;
7640 
7641   /* Punt if len is too large for extraction_mode.  */
7642   if (len > GET_MODE_PRECISION (extraction_mode))
7643     return NULL_RTX;
7644 
7645   if (!MEM_P (inner))
7646     wanted_inner_mode = wanted_inner_reg_mode;
7647   else
7648     {
7649       /* Be careful not to go beyond the extracted object and maintain the
7650 	 natural alignment of the memory.  */
7651       wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7652       while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7653 	     > GET_MODE_BITSIZE (wanted_inner_mode))
7654 	{
7655 	  wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7656 	  gcc_assert (wanted_inner_mode != VOIDmode);
7657 	}
7658     }
7659 
7660   orig_pos = pos;
7661 
7662   if (BITS_BIG_ENDIAN)
7663     {
7664       /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7665 	 BITS_BIG_ENDIAN style.  If position is constant, compute new
7666 	 position.  Otherwise, build subtraction.
7667 	 Note that POS is relative to the mode of the original argument.
7668 	 If it's a MEM we need to recompute POS relative to that.
7669 	 However, if we're extracting from (or inserting into) a register,
7670 	 we want to recompute POS relative to wanted_inner_mode.  */
7671       int width = (MEM_P (inner)
7672 		   ? GET_MODE_BITSIZE (is_mode)
7673 		   : GET_MODE_BITSIZE (wanted_inner_mode));
7674 
7675       if (pos_rtx == 0)
7676 	pos = width - len - pos;
7677       else
7678 	pos_rtx
7679 	  = gen_rtx_MINUS (GET_MODE (pos_rtx),
7680 			   gen_int_mode (width - len, GET_MODE (pos_rtx)),
7681 			   pos_rtx);
7682       /* POS may be less than 0 now, but we check for that below.
7683 	 Note that it can only be less than 0 if !MEM_P (inner).  */
7684     }
7685 
7686   /* If INNER has a wider mode, and this is a constant extraction, try to
7687      make it smaller and adjust the byte to point to the byte containing
7688      the value.  */
7689   if (wanted_inner_mode != VOIDmode
7690       && inner_mode != wanted_inner_mode
7691       && ! pos_rtx
7692       && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7693       && MEM_P (inner)
7694       && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7695       && ! MEM_VOLATILE_P (inner))
7696     {
7697       int offset = 0;
7698 
7699       /* The computations below will be correct if the machine is big
7700 	 endian in both bits and bytes or little endian in bits and bytes.
7701 	 If it is mixed, we must adjust.  */
7702 
7703       /* If bytes are big endian and we had a paradoxical SUBREG, we must
7704 	 adjust OFFSET to compensate.  */
7705       if (BYTES_BIG_ENDIAN
7706 	  && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7707 	offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7708 
7709       /* We can now move to the desired byte.  */
7710       offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7711 		* GET_MODE_SIZE (wanted_inner_mode);
7712       pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7713 
7714       if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7715 	  && is_mode != wanted_inner_mode)
7716 	offset = (GET_MODE_SIZE (is_mode)
7717 		  - GET_MODE_SIZE (wanted_inner_mode) - offset);
7718 
7719       inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7720     }
7721 
7722   /* If INNER is not memory, get it into the proper mode.  If we are changing
7723      its mode, POS must be a constant and smaller than the size of the new
7724      mode.  */
7725   else if (!MEM_P (inner))
7726     {
7727       /* On the LHS, don't create paradoxical subregs implicitely truncating
7728 	 the register unless TRULY_NOOP_TRUNCATION.  */
7729       if (in_dest
7730 	  && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7731 					     wanted_inner_mode))
7732 	return NULL_RTX;
7733 
7734       if (GET_MODE (inner) != wanted_inner_mode
7735 	  && (pos_rtx != 0
7736 	      || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7737 	return NULL_RTX;
7738 
7739       if (orig_pos < 0)
7740 	return NULL_RTX;
7741 
7742       inner = force_to_mode (inner, wanted_inner_mode,
7743 			     pos_rtx
7744 			     || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7745 			     ? HOST_WIDE_INT_M1U
7746 			     : (((HOST_WIDE_INT_1U << len) - 1)
7747 				<< orig_pos),
7748 			     0);
7749     }
7750 
7751   /* Adjust mode of POS_RTX, if needed.  If we want a wider mode, we
7752      have to zero extend.  Otherwise, we can just use a SUBREG.  */
7753   if (pos_rtx != 0
7754       && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7755     {
7756       rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7757 				     GET_MODE (pos_rtx));
7758 
7759       /* If we know that no extraneous bits are set, and that the high
7760 	 bit is not set, convert extraction to cheaper one - either
7761 	 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7762 	 cases.  */
7763       if (flag_expensive_optimizations
7764 	  && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7765 	      && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7766 		   & ~(((unsigned HOST_WIDE_INT)
7767 			GET_MODE_MASK (GET_MODE (pos_rtx)))
7768 		       >> 1))
7769 		  == 0)))
7770 	{
7771 	  rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7772 					  GET_MODE (pos_rtx));
7773 
7774 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7775 	     backends.  */
7776 	  if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7777 	      < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7778 	    temp = temp1;
7779 	}
7780       pos_rtx = temp;
7781     }
7782 
7783   /* Make POS_RTX unless we already have it and it is correct.  If we don't
7784      have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7785      be a CONST_INT.  */
7786   if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7787     pos_rtx = orig_pos_rtx;
7788 
7789   else if (pos_rtx == 0)
7790     pos_rtx = GEN_INT (pos);
7791 
7792   /* Make the required operation.  See if we can use existing rtx.  */
7793   new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7794 			 extraction_mode, inner, GEN_INT (len), pos_rtx);
7795   if (! in_dest)
7796     new_rtx = gen_lowpart (mode, new_rtx);
7797 
7798   return new_rtx;
7799 }
7800 
7801 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7802    with any other operations in X.  Return X without that shift if so.  */
7803 
7804 static rtx
7805 extract_left_shift (rtx x, int count)
7806 {
7807   enum rtx_code code = GET_CODE (x);
7808   machine_mode mode = GET_MODE (x);
7809   rtx tem;
7810 
7811   switch (code)
7812     {
7813     case ASHIFT:
7814       /* This is the shift itself.  If it is wide enough, we will return
7815 	 either the value being shifted if the shift count is equal to
7816 	 COUNT or a shift for the difference.  */
7817       if (CONST_INT_P (XEXP (x, 1))
7818 	  && INTVAL (XEXP (x, 1)) >= count)
7819 	return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7820 				     INTVAL (XEXP (x, 1)) - count);
7821       break;
7822 
7823     case NEG:  case NOT:
7824       if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7825 	return simplify_gen_unary (code, mode, tem, mode);
7826 
7827       break;
7828 
7829     case PLUS:  case IOR:  case XOR:  case AND:
7830       /* If we can safely shift this constant and we find the inner shift,
7831 	 make a new operation.  */
7832       if (CONST_INT_P (XEXP (x, 1))
7833 	  && (UINTVAL (XEXP (x, 1))
7834 	      & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7835 	  && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7836 	{
7837 	  HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7838 	  return simplify_gen_binary (code, mode, tem,
7839 				      gen_int_mode (val, mode));
7840 	}
7841       break;
7842 
7843     default:
7844       break;
7845     }
7846 
7847   return 0;
7848 }
7849 
7850 /* Subroutine of make_compound_operation.  *X_PTR is the rtx at the current
7851    level of the expression and MODE is its mode.  IN_CODE is as for
7852    make_compound_operation.  *NEXT_CODE_PTR is the value of IN_CODE
7853    that should be used when recursing on operands of *X_PTR.
7854 
7855    There are two possible actions:
7856 
7857    - Return null.  This tells the caller to recurse on *X_PTR with IN_CODE
7858      equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7859 
7860    - Return a new rtx, which the caller returns directly.  */
7861 
7862 static rtx
7863 make_compound_operation_int (machine_mode mode, rtx *x_ptr,
7864 			     enum rtx_code in_code,
7865 			     enum rtx_code *next_code_ptr)
7866 {
7867   rtx x = *x_ptr;
7868   enum rtx_code next_code = *next_code_ptr;
7869   enum rtx_code code = GET_CODE (x);
7870   int mode_width = GET_MODE_PRECISION (mode);
7871   rtx rhs, lhs;
7872   rtx new_rtx = 0;
7873   int i;
7874   rtx tem;
7875   bool equality_comparison = false;
7876 
7877   if (in_code == EQ)
7878     {
7879       equality_comparison = true;
7880       in_code = COMPARE;
7881     }
7882 
7883   /* Process depending on the code of this operation.  If NEW is set
7884      nonzero, it will be returned.  */
7885 
7886   switch (code)
7887     {
7888     case ASHIFT:
7889       /* Convert shifts by constants into multiplications if inside
7890 	 an address.  */
7891       if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7892 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7893 	  && INTVAL (XEXP (x, 1)) >= 0)
7894 	{
7895 	  HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7896 	  HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7897 
7898 	  new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7899 	  if (GET_CODE (new_rtx) == NEG)
7900 	    {
7901 	      new_rtx = XEXP (new_rtx, 0);
7902 	      multval = -multval;
7903 	    }
7904 	  multval = trunc_int_for_mode (multval, mode);
7905 	  new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7906 	}
7907       break;
7908 
7909     case PLUS:
7910       lhs = XEXP (x, 0);
7911       rhs = XEXP (x, 1);
7912       lhs = make_compound_operation (lhs, next_code);
7913       rhs = make_compound_operation (rhs, next_code);
7914       if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7915 	{
7916 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7917 				     XEXP (lhs, 1));
7918 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7919 	}
7920       else if (GET_CODE (lhs) == MULT
7921 	       && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7922 	{
7923 	  tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7924 				     simplify_gen_unary (NEG, mode,
7925 							 XEXP (lhs, 1),
7926 							 mode));
7927 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7928 	}
7929       else
7930 	{
7931 	  SUBST (XEXP (x, 0), lhs);
7932 	  SUBST (XEXP (x, 1), rhs);
7933 	}
7934       maybe_swap_commutative_operands (x);
7935       return x;
7936 
7937     case MINUS:
7938       lhs = XEXP (x, 0);
7939       rhs = XEXP (x, 1);
7940       lhs = make_compound_operation (lhs, next_code);
7941       rhs = make_compound_operation (rhs, next_code);
7942       if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7943 	{
7944 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7945 				     XEXP (rhs, 1));
7946 	  return simplify_gen_binary (PLUS, mode, tem, lhs);
7947 	}
7948       else if (GET_CODE (rhs) == MULT
7949 	       && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7950 	{
7951 	  tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7952 				     simplify_gen_unary (NEG, mode,
7953 							 XEXP (rhs, 1),
7954 							 mode));
7955 	  return simplify_gen_binary (PLUS, mode, tem, lhs);
7956 	}
7957       else
7958 	{
7959 	  SUBST (XEXP (x, 0), lhs);
7960 	  SUBST (XEXP (x, 1), rhs);
7961 	  return x;
7962 	}
7963 
7964     case AND:
7965       /* If the second operand is not a constant, we can't do anything
7966 	 with it.  */
7967       if (!CONST_INT_P (XEXP (x, 1)))
7968 	break;
7969 
7970       /* If the constant is a power of two minus one and the first operand
7971 	 is a logical right shift, make an extraction.  */
7972       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7973 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7974 	{
7975 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7976 	  new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7977 				 0, in_code == COMPARE);
7978 	}
7979 
7980       /* Same as previous, but for (subreg (lshiftrt ...)) in first op.  */
7981       else if (GET_CODE (XEXP (x, 0)) == SUBREG
7982 	       && subreg_lowpart_p (XEXP (x, 0))
7983 	       && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7984 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7985 	{
7986 	  rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
7987 	  machine_mode inner_mode = GET_MODE (inner_x0);
7988 	  new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
7989 	  new_rtx = make_extraction (inner_mode, new_rtx, 0,
7990 				     XEXP (inner_x0, 1),
7991 				     i, 1, 0, in_code == COMPARE);
7992 
7993 	  if (new_rtx)
7994 	    {
7995 	      /* If we narrowed the mode when dropping the subreg, then
7996 		 we must zero-extend to keep the semantics of the AND.  */
7997 	      if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
7998 		;
7999 	      else if (SCALAR_INT_MODE_P (inner_mode))
8000 		new_rtx = simplify_gen_unary (ZERO_EXTEND, mode,
8001 					      new_rtx, inner_mode);
8002 	      else
8003 		new_rtx = NULL;
8004 	    }
8005 
8006 	  /* If that didn't give anything, see if the AND simplifies on
8007 	     its own.  */
8008 	  if (!new_rtx && i >= 0)
8009 	    {
8010 	      new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8011 	      new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8012 					 0, in_code == COMPARE);
8013 	    }
8014 	}
8015       /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)).  */
8016       else if ((GET_CODE (XEXP (x, 0)) == XOR
8017 		|| GET_CODE (XEXP (x, 0)) == IOR)
8018 	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8019 	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8020 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8021 	{
8022 	  /* Apply the distributive law, and then try to make extractions.  */
8023 	  new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8024 				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8025 					     XEXP (x, 1)),
8026 				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8027 					     XEXP (x, 1)));
8028 	  new_rtx = make_compound_operation (new_rtx, in_code);
8029 	}
8030 
8031       /* If we are have (and (rotate X C) M) and C is larger than the number
8032 	 of bits in M, this is an extraction.  */
8033 
8034       else if (GET_CODE (XEXP (x, 0)) == ROTATE
8035 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8036 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8037 	       && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8038 	{
8039 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8040 	  new_rtx = make_extraction (mode, new_rtx,
8041 				 (GET_MODE_PRECISION (mode)
8042 				  - INTVAL (XEXP (XEXP (x, 0), 1))),
8043 				 NULL_RTX, i, 1, 0, in_code == COMPARE);
8044 	}
8045 
8046       /* On machines without logical shifts, if the operand of the AND is
8047 	 a logical shift and our mask turns off all the propagated sign
8048 	 bits, we can replace the logical shift with an arithmetic shift.  */
8049       else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8050 	       && !have_insn_for (LSHIFTRT, mode)
8051 	       && have_insn_for (ASHIFTRT, mode)
8052 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8053 	       && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8054 	       && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8055 	       && mode_width <= HOST_BITS_PER_WIDE_INT)
8056 	{
8057 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8058 
8059 	  mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8060 	  if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8061 	    SUBST (XEXP (x, 0),
8062 		   gen_rtx_ASHIFTRT (mode,
8063 				     make_compound_operation
8064 				     (XEXP (XEXP (x, 0), 0), next_code),
8065 				     XEXP (XEXP (x, 0), 1)));
8066 	}
8067 
8068       /* If the constant is one less than a power of two, this might be
8069 	 representable by an extraction even if no shift is present.
8070 	 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8071 	 we are in a COMPARE.  */
8072       else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8073 	new_rtx = make_extraction (mode,
8074 			       make_compound_operation (XEXP (x, 0),
8075 							next_code),
8076 			       0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8077 
8078       /* If we are in a comparison and this is an AND with a power of two,
8079 	 convert this into the appropriate bit extract.  */
8080       else if (in_code == COMPARE
8081 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8082 	       && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8083 	new_rtx = make_extraction (mode,
8084 				   make_compound_operation (XEXP (x, 0),
8085 							    next_code),
8086 				   i, NULL_RTX, 1, 1, 0, 1);
8087 
8088       /* If the one operand is a paradoxical subreg of a register or memory and
8089 	 the constant (limited to the smaller mode) has only zero bits where
8090 	 the sub expression has known zero bits, this can be expressed as
8091 	 a zero_extend.  */
8092       else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8093 	{
8094 	  rtx sub;
8095 
8096 	  sub = XEXP (XEXP (x, 0), 0);
8097 	  machine_mode sub_mode = GET_MODE (sub);
8098 	  if ((REG_P (sub) || MEM_P (sub))
8099 	      && GET_MODE_PRECISION (sub_mode) < mode_width)
8100 	    {
8101 	      unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8102 	      unsigned HOST_WIDE_INT mask;
8103 
8104 	      /* original AND constant with all the known zero bits set */
8105 	      mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8106 	      if ((mask & mode_mask) == mode_mask)
8107 		{
8108 		  new_rtx = make_compound_operation (sub, next_code);
8109 		  new_rtx = make_extraction (mode, new_rtx, 0, 0,
8110 					     GET_MODE_PRECISION (sub_mode),
8111 					     1, 0, in_code == COMPARE);
8112 		}
8113 	    }
8114 	}
8115 
8116       break;
8117 
8118     case LSHIFTRT:
8119       /* If the sign bit is known to be zero, replace this with an
8120 	 arithmetic shift.  */
8121       if (have_insn_for (ASHIFTRT, mode)
8122 	  && ! have_insn_for (LSHIFTRT, mode)
8123 	  && mode_width <= HOST_BITS_PER_WIDE_INT
8124 	  && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8125 	{
8126 	  new_rtx = gen_rtx_ASHIFTRT (mode,
8127 				  make_compound_operation (XEXP (x, 0),
8128 							   next_code),
8129 				  XEXP (x, 1));
8130 	  break;
8131 	}
8132 
8133       /* fall through */
8134 
8135     case ASHIFTRT:
8136       lhs = XEXP (x, 0);
8137       rhs = XEXP (x, 1);
8138 
8139       /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8140 	 this is a SIGN_EXTRACT.  */
8141       if (CONST_INT_P (rhs)
8142 	  && GET_CODE (lhs) == ASHIFT
8143 	  && CONST_INT_P (XEXP (lhs, 1))
8144 	  && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8145 	  && INTVAL (XEXP (lhs, 1)) >= 0
8146 	  && INTVAL (rhs) < mode_width)
8147 	{
8148 	  new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8149 	  new_rtx = make_extraction (mode, new_rtx,
8150 				 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8151 				 NULL_RTX, mode_width - INTVAL (rhs),
8152 				 code == LSHIFTRT, 0, in_code == COMPARE);
8153 	  break;
8154 	}
8155 
8156       /* See if we have operations between an ASHIFTRT and an ASHIFT.
8157 	 If so, try to merge the shifts into a SIGN_EXTEND.  We could
8158 	 also do this for some cases of SIGN_EXTRACT, but it doesn't
8159 	 seem worth the effort; the case checked for occurs on Alpha.  */
8160 
8161       if (!OBJECT_P (lhs)
8162 	  && ! (GET_CODE (lhs) == SUBREG
8163 		&& (OBJECT_P (SUBREG_REG (lhs))))
8164 	  && CONST_INT_P (rhs)
8165 	  && INTVAL (rhs) >= 0
8166 	  && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8167 	  && INTVAL (rhs) < mode_width
8168 	  && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
8169 	new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
8170 			       0, NULL_RTX, mode_width - INTVAL (rhs),
8171 			       code == LSHIFTRT, 0, in_code == COMPARE);
8172 
8173       break;
8174 
8175     case SUBREG:
8176       /* Call ourselves recursively on the inner expression.  If we are
8177 	 narrowing the object and it has a different RTL code from
8178 	 what it originally did, do this SUBREG as a force_to_mode.  */
8179       {
8180 	rtx inner = SUBREG_REG (x), simplified;
8181 	enum rtx_code subreg_code = in_code;
8182 
8183 	/* If the SUBREG is masking of a logical right shift,
8184 	   make an extraction.  */
8185 	if (GET_CODE (inner) == LSHIFTRT
8186 	    && CONST_INT_P (XEXP (inner, 1))
8187 	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8188 	    && (UINTVAL (XEXP (inner, 1))
8189 		< GET_MODE_PRECISION (GET_MODE (inner)))
8190 	    && subreg_lowpart_p (x))
8191 	  {
8192 	    new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8193 	    int width = GET_MODE_PRECISION (GET_MODE (inner))
8194 			- INTVAL (XEXP (inner, 1));
8195 	    if (width > mode_width)
8196 	      width = mode_width;
8197 	    new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8198 				       width, 1, 0, in_code == COMPARE);
8199 	    break;
8200 	  }
8201 
8202 	/* If in_code is COMPARE, it isn't always safe to pass it through
8203 	   to the recursive make_compound_operation call.  */
8204 	if (subreg_code == COMPARE
8205 	    && (!subreg_lowpart_p (x)
8206 		|| GET_CODE (inner) == SUBREG
8207 		/* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8208 		   is (const_int 0), rather than
8209 		   (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8210 		   Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8211 		   for non-equality comparisons against 0 is not equivalent
8212 		   to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0).  */
8213 		|| (GET_CODE (inner) == AND
8214 		    && CONST_INT_P (XEXP (inner, 1))
8215 		    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8216 		    && exact_log2 (UINTVAL (XEXP (inner, 1)))
8217 		       >= GET_MODE_BITSIZE (mode) - 1)))
8218 	  subreg_code = SET;
8219 
8220 	tem = make_compound_operation (inner, subreg_code);
8221 
8222 	simplified
8223 	  = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8224 	if (simplified)
8225 	  tem = simplified;
8226 
8227 	if (GET_CODE (tem) != GET_CODE (inner)
8228 	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8229 	    && subreg_lowpart_p (x))
8230 	  {
8231 	    rtx newer
8232 	      = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8233 
8234 	    /* If we have something other than a SUBREG, we might have
8235 	       done an expansion, so rerun ourselves.  */
8236 	    if (GET_CODE (newer) != SUBREG)
8237 	      newer = make_compound_operation (newer, in_code);
8238 
8239 	    /* force_to_mode can expand compounds.  If it just re-expanded the
8240 	       compound, use gen_lowpart to convert to the desired mode.  */
8241 	    if (rtx_equal_p (newer, x)
8242 		/* Likewise if it re-expanded the compound only partially.
8243 		   This happens for SUBREG of ZERO_EXTRACT if they extract
8244 		   the same number of bits.  */
8245 		|| (GET_CODE (newer) == SUBREG
8246 		    && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8247 			|| GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8248 		    && GET_CODE (inner) == AND
8249 		    && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8250 	      return gen_lowpart (GET_MODE (x), tem);
8251 
8252 	    return newer;
8253 	  }
8254 
8255 	if (simplified)
8256 	  return tem;
8257       }
8258       break;
8259 
8260     default:
8261       break;
8262     }
8263 
8264   if (new_rtx)
8265     *x_ptr = gen_lowpart (mode, new_rtx);
8266   *next_code_ptr = next_code;
8267   return NULL_RTX;
8268 }
8269 
8270 /* Look at the expression rooted at X.  Look for expressions
8271    equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8272    Form these expressions.
8273 
8274    Return the new rtx, usually just X.
8275 
8276    Also, for machines like the VAX that don't have logical shift insns,
8277    try to convert logical to arithmetic shift operations in cases where
8278    they are equivalent.  This undoes the canonicalizations to logical
8279    shifts done elsewhere.
8280 
8281    We try, as much as possible, to re-use rtl expressions to save memory.
8282 
8283    IN_CODE says what kind of expression we are processing.  Normally, it is
8284    SET.  In a memory address it is MEM.  When processing the arguments of
8285    a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8286    precisely it is an equality comparison against zero.  */
8287 
8288 rtx
8289 make_compound_operation (rtx x, enum rtx_code in_code)
8290 {
8291   enum rtx_code code = GET_CODE (x);
8292   const char *fmt;
8293   int i, j;
8294   enum rtx_code next_code;
8295   rtx new_rtx, tem;
8296 
8297   /* Select the code to be used in recursive calls.  Once we are inside an
8298      address, we stay there.  If we have a comparison, set to COMPARE,
8299      but once inside, go back to our default of SET.  */
8300 
8301   next_code = (code == MEM ? MEM
8302 	       : ((code == COMPARE || COMPARISON_P (x))
8303 		  && XEXP (x, 1) == const0_rtx) ? COMPARE
8304 	       : in_code == COMPARE || in_code == EQ ? SET : in_code);
8305 
8306   if (SCALAR_INT_MODE_P (GET_MODE (x)))
8307     {
8308       rtx new_rtx = make_compound_operation_int (GET_MODE (x), &x,
8309 						 in_code, &next_code);
8310       if (new_rtx)
8311 	return new_rtx;
8312       code = GET_CODE (x);
8313     }
8314 
8315   /* Now recursively process each operand of this operation.  We need to
8316      handle ZERO_EXTEND specially so that we don't lose track of the
8317      inner mode.  */
8318   if (code == ZERO_EXTEND)
8319     {
8320       new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8321       tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8322 					    new_rtx, GET_MODE (XEXP (x, 0)));
8323       if (tem)
8324 	return tem;
8325       SUBST (XEXP (x, 0), new_rtx);
8326       return x;
8327     }
8328 
8329   fmt = GET_RTX_FORMAT (code);
8330   for (i = 0; i < GET_RTX_LENGTH (code); i++)
8331     if (fmt[i] == 'e')
8332       {
8333 	new_rtx = make_compound_operation (XEXP (x, i), next_code);
8334 	SUBST (XEXP (x, i), new_rtx);
8335       }
8336     else if (fmt[i] == 'E')
8337       for (j = 0; j < XVECLEN (x, i); j++)
8338 	{
8339 	  new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8340 	  SUBST (XVECEXP (x, i, j), new_rtx);
8341 	}
8342 
8343   maybe_swap_commutative_operands (x);
8344   return x;
8345 }
8346 
8347 /* Given M see if it is a value that would select a field of bits
8348    within an item, but not the entire word.  Return -1 if not.
8349    Otherwise, return the starting position of the field, where 0 is the
8350    low-order bit.
8351 
8352    *PLEN is set to the length of the field.  */
8353 
8354 static int
8355 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8356 {
8357   /* Get the bit number of the first 1 bit from the right, -1 if none.  */
8358   int pos = m ? ctz_hwi (m) : -1;
8359   int len = 0;
8360 
8361   if (pos >= 0)
8362     /* Now shift off the low-order zero bits and see if we have a
8363        power of two minus 1.  */
8364     len = exact_log2 ((m >> pos) + 1);
8365 
8366   if (len <= 0)
8367     pos = -1;
8368 
8369   *plen = len;
8370   return pos;
8371 }
8372 
8373 /* If X refers to a register that equals REG in value, replace these
8374    references with REG.  */
8375 static rtx
8376 canon_reg_for_combine (rtx x, rtx reg)
8377 {
8378   rtx op0, op1, op2;
8379   const char *fmt;
8380   int i;
8381   bool copied;
8382 
8383   enum rtx_code code = GET_CODE (x);
8384   switch (GET_RTX_CLASS (code))
8385     {
8386     case RTX_UNARY:
8387       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8388       if (op0 != XEXP (x, 0))
8389 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8390 				   GET_MODE (reg));
8391       break;
8392 
8393     case RTX_BIN_ARITH:
8394     case RTX_COMM_ARITH:
8395       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8396       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8397       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8398 	return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8399       break;
8400 
8401     case RTX_COMPARE:
8402     case RTX_COMM_COMPARE:
8403       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8404       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8405       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8406 	return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8407 					GET_MODE (op0), op0, op1);
8408       break;
8409 
8410     case RTX_TERNARY:
8411     case RTX_BITFIELD_OPS:
8412       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8413       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8414       op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8415       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8416 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8417 				     GET_MODE (op0), op0, op1, op2);
8418       /* FALLTHRU */
8419 
8420     case RTX_OBJ:
8421       if (REG_P (x))
8422 	{
8423 	  if (rtx_equal_p (get_last_value (reg), x)
8424 	      || rtx_equal_p (reg, get_last_value (x)))
8425 	    return reg;
8426 	  else
8427 	    break;
8428 	}
8429 
8430       /* fall through */
8431 
8432     default:
8433       fmt = GET_RTX_FORMAT (code);
8434       copied = false;
8435       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8436 	if (fmt[i] == 'e')
8437 	  {
8438 	    rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8439 	    if (op != XEXP (x, i))
8440 	      {
8441 		if (!copied)
8442 		  {
8443 		    copied = true;
8444 		    x = copy_rtx (x);
8445 		  }
8446 		XEXP (x, i) = op;
8447 	      }
8448 	  }
8449 	else if (fmt[i] == 'E')
8450 	  {
8451 	    int j;
8452 	    for (j = 0; j < XVECLEN (x, i); j++)
8453 	      {
8454 		rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8455 		if (op != XVECEXP (x, i, j))
8456 		  {
8457 		    if (!copied)
8458 		      {
8459 			copied = true;
8460 			x = copy_rtx (x);
8461 		      }
8462 		    XVECEXP (x, i, j) = op;
8463 		  }
8464 	      }
8465 	  }
8466 
8467       break;
8468     }
8469 
8470   return x;
8471 }
8472 
8473 /* Return X converted to MODE.  If the value is already truncated to
8474    MODE we can just return a subreg even though in the general case we
8475    would need an explicit truncation.  */
8476 
8477 static rtx
8478 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8479 {
8480   if (!CONST_INT_P (x)
8481       && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8482       && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8483       && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8484     {
8485       /* Bit-cast X into an integer mode.  */
8486       if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8487 	x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8488       x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8489 			      x, GET_MODE (x));
8490     }
8491 
8492   return gen_lowpart (mode, x);
8493 }
8494 
8495 /* See if X can be simplified knowing that we will only refer to it in
8496    MODE and will only refer to those bits that are nonzero in MASK.
8497    If other bits are being computed or if masking operations are done
8498    that select a superset of the bits in MASK, they can sometimes be
8499    ignored.
8500 
8501    Return a possibly simplified expression, but always convert X to
8502    MODE.  If X is a CONST_INT, AND the CONST_INT with MASK.
8503 
8504    If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8505    are all off in X.  This is used when X will be complemented, by either
8506    NOT, NEG, or XOR.  */
8507 
8508 static rtx
8509 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8510 	       int just_select)
8511 {
8512   enum rtx_code code = GET_CODE (x);
8513   int next_select = just_select || code == XOR || code == NOT || code == NEG;
8514   machine_mode op_mode;
8515   unsigned HOST_WIDE_INT fuller_mask, nonzero;
8516   rtx op0, op1, temp;
8517 
8518   /* If this is a CALL or ASM_OPERANDS, don't do anything.  Some of the
8519      code below will do the wrong thing since the mode of such an
8520      expression is VOIDmode.
8521 
8522      Also do nothing if X is a CLOBBER; this can happen if X was
8523      the return value from a call to gen_lowpart.  */
8524   if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8525     return x;
8526 
8527   /* We want to perform the operation in its present mode unless we know
8528      that the operation is valid in MODE, in which case we do the operation
8529      in MODE.  */
8530   op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8531 	      && have_insn_for (code, mode))
8532 	     ? mode : GET_MODE (x));
8533 
8534   /* It is not valid to do a right-shift in a narrower mode
8535      than the one it came in with.  */
8536   if ((code == LSHIFTRT || code == ASHIFTRT)
8537       && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8538     op_mode = GET_MODE (x);
8539 
8540   /* Truncate MASK to fit OP_MODE.  */
8541   if (op_mode)
8542     mask &= GET_MODE_MASK (op_mode);
8543 
8544   /* When we have an arithmetic operation, or a shift whose count we
8545      do not know, we need to assume that all bits up to the highest-order
8546      bit in MASK will be needed.  This is how we form such a mask.  */
8547   if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8548     fuller_mask = HOST_WIDE_INT_M1U;
8549   else
8550     fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8551 		   - 1);
8552 
8553   /* Determine what bits of X are guaranteed to be (non)zero.  */
8554   nonzero = nonzero_bits (x, mode);
8555 
8556   /* If none of the bits in X are needed, return a zero.  */
8557   if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8558     x = const0_rtx;
8559 
8560   /* If X is a CONST_INT, return a new one.  Do this here since the
8561      test below will fail.  */
8562   if (CONST_INT_P (x))
8563     {
8564       if (SCALAR_INT_MODE_P (mode))
8565 	return gen_int_mode (INTVAL (x) & mask, mode);
8566       else
8567 	{
8568 	  x = GEN_INT (INTVAL (x) & mask);
8569 	  return gen_lowpart_common (mode, x);
8570 	}
8571     }
8572 
8573   /* If X is narrower than MODE and we want all the bits in X's mode, just
8574      get X in the proper mode.  */
8575   if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8576       && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8577     return gen_lowpart (mode, x);
8578 
8579   /* We can ignore the effect of a SUBREG if it narrows the mode or
8580      if the constant masks to zero all the bits the mode doesn't have.  */
8581   if (GET_CODE (x) == SUBREG
8582       && subreg_lowpart_p (x)
8583       && ((GET_MODE_SIZE (GET_MODE (x))
8584 	   < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8585 	  || (0 == (mask
8586 		    & GET_MODE_MASK (GET_MODE (x))
8587 		    & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8588     return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8589 
8590   /* The arithmetic simplifications here only work for scalar integer modes.  */
8591   if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8592     return gen_lowpart_or_truncate (mode, x);
8593 
8594   switch (code)
8595     {
8596     case CLOBBER:
8597       /* If X is a (clobber (const_int)), return it since we know we are
8598 	 generating something that won't match.  */
8599       return x;
8600 
8601     case SIGN_EXTEND:
8602     case ZERO_EXTEND:
8603     case ZERO_EXTRACT:
8604     case SIGN_EXTRACT:
8605       x = expand_compound_operation (x);
8606       if (GET_CODE (x) != code)
8607 	return force_to_mode (x, mode, mask, next_select);
8608       break;
8609 
8610     case TRUNCATE:
8611       /* Similarly for a truncate.  */
8612       return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8613 
8614     case AND:
8615       /* If this is an AND with a constant, convert it into an AND
8616 	 whose constant is the AND of that constant with MASK.  If it
8617 	 remains an AND of MASK, delete it since it is redundant.  */
8618 
8619       if (CONST_INT_P (XEXP (x, 1)))
8620 	{
8621 	  x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8622 				      mask & INTVAL (XEXP (x, 1)));
8623 
8624 	  /* If X is still an AND, see if it is an AND with a mask that
8625 	     is just some low-order bits.  If so, and it is MASK, we don't
8626 	     need it.  */
8627 
8628 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8629 	      && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8630 		  == mask))
8631 	    x = XEXP (x, 0);
8632 
8633 	  /* If it remains an AND, try making another AND with the bits
8634 	     in the mode mask that aren't in MASK turned on.  If the
8635 	     constant in the AND is wide enough, this might make a
8636 	     cheaper constant.  */
8637 
8638 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8639 	      && GET_MODE_MASK (GET_MODE (x)) != mask
8640 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8641 	    {
8642 	      unsigned HOST_WIDE_INT cval
8643 		= UINTVAL (XEXP (x, 1))
8644 		  | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8645 	      rtx y;
8646 
8647 	      y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0),
8648 				       gen_int_mode (cval, GET_MODE (x)));
8649 	      if (set_src_cost (y, GET_MODE (x), optimize_this_for_speed_p)
8650 	          < set_src_cost (x, GET_MODE (x), optimize_this_for_speed_p))
8651 		x = y;
8652 	    }
8653 
8654 	  break;
8655 	}
8656 
8657       goto binop;
8658 
8659     case PLUS:
8660       /* In (and (plus FOO C1) M), if M is a mask that just turns off
8661 	 low-order bits (as in an alignment operation) and FOO is already
8662 	 aligned to that boundary, mask C1 to that boundary as well.
8663 	 This may eliminate that PLUS and, later, the AND.  */
8664 
8665       {
8666 	unsigned int width = GET_MODE_PRECISION (mode);
8667 	unsigned HOST_WIDE_INT smask = mask;
8668 
8669 	/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8670 	   number, sign extend it.  */
8671 
8672 	if (width < HOST_BITS_PER_WIDE_INT
8673 	    && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8674 	  smask |= HOST_WIDE_INT_M1U << width;
8675 
8676 	if (CONST_INT_P (XEXP (x, 1))
8677 	    && pow2p_hwi (- smask)
8678 	    && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8679 	    && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8680 	  return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8681 					       (INTVAL (XEXP (x, 1)) & smask)),
8682 				mode, smask, next_select);
8683       }
8684 
8685       /* fall through */
8686 
8687     case MULT:
8688       /* Substituting into the operands of a widening MULT is not likely to
8689 	 create RTL matching a machine insn.  */
8690       if (code == MULT
8691 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8692 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8693 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8694 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8695 	  && REG_P (XEXP (XEXP (x, 0), 0))
8696 	  && REG_P (XEXP (XEXP (x, 1), 0)))
8697 	return gen_lowpart_or_truncate (mode, x);
8698 
8699       /* For PLUS, MINUS and MULT, we need any bits less significant than the
8700 	 most significant bit in MASK since carries from those bits will
8701 	 affect the bits we are interested in.  */
8702       mask = fuller_mask;
8703       goto binop;
8704 
8705     case MINUS:
8706       /* If X is (minus C Y) where C's least set bit is larger than any bit
8707 	 in the mask, then we may replace with (neg Y).  */
8708       if (CONST_INT_P (XEXP (x, 0))
8709 	  && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8710 	{
8711 	  x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8712 				  GET_MODE (x));
8713 	  return force_to_mode (x, mode, mask, next_select);
8714 	}
8715 
8716       /* Similarly, if C contains every bit in the fuller_mask, then we may
8717 	 replace with (not Y).  */
8718       if (CONST_INT_P (XEXP (x, 0))
8719 	  && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8720 	{
8721 	  x = simplify_gen_unary (NOT, GET_MODE (x),
8722 				  XEXP (x, 1), GET_MODE (x));
8723 	  return force_to_mode (x, mode, mask, next_select);
8724 	}
8725 
8726       mask = fuller_mask;
8727       goto binop;
8728 
8729     case IOR:
8730     case XOR:
8731       /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8732 	 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8733 	 operation which may be a bitfield extraction.  Ensure that the
8734 	 constant we form is not wider than the mode of X.  */
8735 
8736       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8737 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8738 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8739 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8740 	  && CONST_INT_P (XEXP (x, 1))
8741 	  && ((INTVAL (XEXP (XEXP (x, 0), 1))
8742 	       + floor_log2 (INTVAL (XEXP (x, 1))))
8743 	      < GET_MODE_PRECISION (GET_MODE (x)))
8744 	  && (UINTVAL (XEXP (x, 1))
8745 	      & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8746 	{
8747 	  temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8748 			       << INTVAL (XEXP (XEXP (x, 0), 1)),
8749 			       GET_MODE (x));
8750 	  temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8751 				      XEXP (XEXP (x, 0), 0), temp);
8752 	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8753 				   XEXP (XEXP (x, 0), 1));
8754 	  return force_to_mode (x, mode, mask, next_select);
8755 	}
8756 
8757     binop:
8758       /* For most binary operations, just propagate into the operation and
8759 	 change the mode if we have an operation of that mode.  */
8760 
8761       op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8762       op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8763 
8764       /* If we ended up truncating both operands, truncate the result of the
8765 	 operation instead.  */
8766       if (GET_CODE (op0) == TRUNCATE
8767 	  && GET_CODE (op1) == TRUNCATE)
8768 	{
8769 	  op0 = XEXP (op0, 0);
8770 	  op1 = XEXP (op1, 0);
8771 	}
8772 
8773       op0 = gen_lowpart_or_truncate (op_mode, op0);
8774       op1 = gen_lowpart_or_truncate (op_mode, op1);
8775 
8776       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8777 	x = simplify_gen_binary (code, op_mode, op0, op1);
8778       break;
8779 
8780     case ASHIFT:
8781       /* For left shifts, do the same, but just for the first operand.
8782 	 However, we cannot do anything with shifts where we cannot
8783 	 guarantee that the counts are smaller than the size of the mode
8784 	 because such a count will have a different meaning in a
8785 	 wider mode.  */
8786 
8787       if (! (CONST_INT_P (XEXP (x, 1))
8788 	     && INTVAL (XEXP (x, 1)) >= 0
8789 	     && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8790 	  && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8791 		&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8792 		    < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8793 	break;
8794 
8795       /* If the shift count is a constant and we can do arithmetic in
8796 	 the mode of the shift, refine which bits we need.  Otherwise, use the
8797 	 conservative form of the mask.  */
8798       if (CONST_INT_P (XEXP (x, 1))
8799 	  && INTVAL (XEXP (x, 1)) >= 0
8800 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8801 	  && HWI_COMPUTABLE_MODE_P (op_mode))
8802 	mask >>= INTVAL (XEXP (x, 1));
8803       else
8804 	mask = fuller_mask;
8805 
8806       op0 = gen_lowpart_or_truncate (op_mode,
8807 				     force_to_mode (XEXP (x, 0), mode,
8808 						    mask, next_select));
8809 
8810       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8811 	x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8812       break;
8813 
8814     case LSHIFTRT:
8815       /* Here we can only do something if the shift count is a constant,
8816 	 this shift constant is valid for the host, and we can do arithmetic
8817 	 in OP_MODE.  */
8818 
8819       if (CONST_INT_P (XEXP (x, 1))
8820 	  && INTVAL (XEXP (x, 1)) >= 0
8821 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8822 	  && HWI_COMPUTABLE_MODE_P (op_mode))
8823 	{
8824 	  rtx inner = XEXP (x, 0);
8825 	  unsigned HOST_WIDE_INT inner_mask;
8826 
8827 	  /* Select the mask of the bits we need for the shift operand.  */
8828 	  inner_mask = mask << INTVAL (XEXP (x, 1));
8829 
8830 	  /* We can only change the mode of the shift if we can do arithmetic
8831 	     in the mode of the shift and INNER_MASK is no wider than the
8832 	     width of X's mode.  */
8833 	  if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8834 	    op_mode = GET_MODE (x);
8835 
8836 	  inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8837 
8838 	  if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8839 	    x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8840 	}
8841 
8842       /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8843 	 shift and AND produces only copies of the sign bit (C2 is one less
8844 	 than a power of two), we can do this with just a shift.  */
8845 
8846       if (GET_CODE (x) == LSHIFTRT
8847 	  && CONST_INT_P (XEXP (x, 1))
8848 	  /* The shift puts one of the sign bit copies in the least significant
8849 	     bit.  */
8850 	  && ((INTVAL (XEXP (x, 1))
8851 	       + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8852 	      >= GET_MODE_PRECISION (GET_MODE (x)))
8853 	  && pow2p_hwi (mask + 1)
8854 	  /* Number of bits left after the shift must be more than the mask
8855 	     needs.  */
8856 	  && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8857 	      <= GET_MODE_PRECISION (GET_MODE (x)))
8858 	  /* Must be more sign bit copies than the mask needs.  */
8859 	  && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8860 	      >= exact_log2 (mask + 1)))
8861 	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8862 				 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8863 					  - exact_log2 (mask + 1)));
8864 
8865       goto shiftrt;
8866 
8867     case ASHIFTRT:
8868       /* If we are just looking for the sign bit, we don't need this shift at
8869 	 all, even if it has a variable count.  */
8870       if (val_signbit_p (GET_MODE (x), mask))
8871 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8872 
8873       /* If this is a shift by a constant, get a mask that contains those bits
8874 	 that are not copies of the sign bit.  We then have two cases:  If
8875 	 MASK only includes those bits, this can be a logical shift, which may
8876 	 allow simplifications.  If MASK is a single-bit field not within
8877 	 those bits, we are requesting a copy of the sign bit and hence can
8878 	 shift the sign bit to the appropriate location.  */
8879 
8880       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8881 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8882 	{
8883 	  int i;
8884 
8885 	  /* If the considered data is wider than HOST_WIDE_INT, we can't
8886 	     represent a mask for all its bits in a single scalar.
8887 	     But we only care about the lower bits, so calculate these.  */
8888 
8889 	  if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8890 	    {
8891 	      nonzero = HOST_WIDE_INT_M1U;
8892 
8893 	      /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8894 		 is the number of bits a full-width mask would have set.
8895 		 We need only shift if these are fewer than nonzero can
8896 		 hold.  If not, we must keep all bits set in nonzero.  */
8897 
8898 	      if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8899 		  < HOST_BITS_PER_WIDE_INT)
8900 		nonzero >>= INTVAL (XEXP (x, 1))
8901 			    + HOST_BITS_PER_WIDE_INT
8902 			    - GET_MODE_PRECISION (GET_MODE (x)) ;
8903 	    }
8904 	  else
8905 	    {
8906 	      nonzero = GET_MODE_MASK (GET_MODE (x));
8907 	      nonzero >>= INTVAL (XEXP (x, 1));
8908 	    }
8909 
8910 	  if ((mask & ~nonzero) == 0)
8911 	    {
8912 	      x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8913 					XEXP (x, 0), INTVAL (XEXP (x, 1)));
8914 	      if (GET_CODE (x) != ASHIFTRT)
8915 		return force_to_mode (x, mode, mask, next_select);
8916 	    }
8917 
8918 	  else if ((i = exact_log2 (mask)) >= 0)
8919 	    {
8920 	      x = simplify_shift_const
8921 		  (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8922 		   GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8923 
8924 	      if (GET_CODE (x) != ASHIFTRT)
8925 		return force_to_mode (x, mode, mask, next_select);
8926 	    }
8927 	}
8928 
8929       /* If MASK is 1, convert this to an LSHIFTRT.  This can be done
8930 	 even if the shift count isn't a constant.  */
8931       if (mask == 1)
8932 	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8933 				 XEXP (x, 0), XEXP (x, 1));
8934 
8935     shiftrt:
8936 
8937       /* If this is a zero- or sign-extension operation that just affects bits
8938 	 we don't care about, remove it.  Be sure the call above returned
8939 	 something that is still a shift.  */
8940 
8941       if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8942 	  && CONST_INT_P (XEXP (x, 1))
8943 	  && INTVAL (XEXP (x, 1)) >= 0
8944 	  && (INTVAL (XEXP (x, 1))
8945 	      <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8946 	  && GET_CODE (XEXP (x, 0)) == ASHIFT
8947 	  && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8948 	return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8949 			      next_select);
8950 
8951       break;
8952 
8953     case ROTATE:
8954     case ROTATERT:
8955       /* If the shift count is constant and we can do computations
8956 	 in the mode of X, compute where the bits we care about are.
8957 	 Otherwise, we can't do anything.  Don't change the mode of
8958 	 the shift or propagate MODE into the shift, though.  */
8959       if (CONST_INT_P (XEXP (x, 1))
8960 	  && INTVAL (XEXP (x, 1)) >= 0)
8961 	{
8962 	  temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8963 					    GET_MODE (x),
8964 					    gen_int_mode (mask, GET_MODE (x)),
8965 					    XEXP (x, 1));
8966 	  if (temp && CONST_INT_P (temp))
8967 	    x = simplify_gen_binary (code, GET_MODE (x),
8968 				     force_to_mode (XEXP (x, 0), GET_MODE (x),
8969 						    INTVAL (temp), next_select),
8970 				     XEXP (x, 1));
8971 	}
8972       break;
8973 
8974     case NEG:
8975       /* If we just want the low-order bit, the NEG isn't needed since it
8976 	 won't change the low-order bit.  */
8977       if (mask == 1)
8978 	return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8979 
8980       /* We need any bits less significant than the most significant bit in
8981 	 MASK since carries from those bits will affect the bits we are
8982 	 interested in.  */
8983       mask = fuller_mask;
8984       goto unop;
8985 
8986     case NOT:
8987       /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8988 	 same as the XOR case above.  Ensure that the constant we form is not
8989 	 wider than the mode of X.  */
8990 
8991       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8992 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8993 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8994 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8995 	      < GET_MODE_PRECISION (GET_MODE (x)))
8996 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8997 	{
8998 	  temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8999 			       GET_MODE (x));
9000 	  temp = simplify_gen_binary (XOR, GET_MODE (x),
9001 				      XEXP (XEXP (x, 0), 0), temp);
9002 	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
9003 				   temp, XEXP (XEXP (x, 0), 1));
9004 
9005 	  return force_to_mode (x, mode, mask, next_select);
9006 	}
9007 
9008       /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9009 	 use the full mask inside the NOT.  */
9010       mask = fuller_mask;
9011 
9012     unop:
9013       op0 = gen_lowpart_or_truncate (op_mode,
9014 				     force_to_mode (XEXP (x, 0), mode, mask,
9015 						    next_select));
9016       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
9017 	x = simplify_gen_unary (code, op_mode, op0, op_mode);
9018       break;
9019 
9020     case NE:
9021       /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9022 	 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9023 	 which is equal to STORE_FLAG_VALUE.  */
9024       if ((mask & ~STORE_FLAG_VALUE) == 0
9025 	  && XEXP (x, 1) == const0_rtx
9026 	  && GET_MODE (XEXP (x, 0)) == mode
9027 	  && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9028 	  && (nonzero_bits (XEXP (x, 0), mode)
9029 	      == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9030 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9031 
9032       break;
9033 
9034     case IF_THEN_ELSE:
9035       /* We have no way of knowing if the IF_THEN_ELSE can itself be
9036 	 written in a narrower mode.  We play it safe and do not do so.  */
9037 
9038       op0 = gen_lowpart_or_truncate (GET_MODE (x),
9039 				     force_to_mode (XEXP (x, 1), mode,
9040 						    mask, next_select));
9041       op1 = gen_lowpart_or_truncate (GET_MODE (x),
9042 				     force_to_mode (XEXP (x, 2), mode,
9043 						    mask, next_select));
9044       if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9045 	x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
9046 				  GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9047 				  op0, op1);
9048       break;
9049 
9050     default:
9051       break;
9052     }
9053 
9054   /* Ensure we return a value of the proper mode.  */
9055   return gen_lowpart_or_truncate (mode, x);
9056 }
9057 
9058 /* Return nonzero if X is an expression that has one of two values depending on
9059    whether some other value is zero or nonzero.  In that case, we return the
9060    value that is being tested, *PTRUE is set to the value if the rtx being
9061    returned has a nonzero value, and *PFALSE is set to the other alternative.
9062 
9063    If we return zero, we set *PTRUE and *PFALSE to X.  */
9064 
9065 static rtx
9066 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9067 {
9068   machine_mode mode = GET_MODE (x);
9069   enum rtx_code code = GET_CODE (x);
9070   rtx cond0, cond1, true0, true1, false0, false1;
9071   unsigned HOST_WIDE_INT nz;
9072 
9073   /* If we are comparing a value against zero, we are done.  */
9074   if ((code == NE || code == EQ)
9075       && XEXP (x, 1) == const0_rtx)
9076     {
9077       *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9078       *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9079       return XEXP (x, 0);
9080     }
9081 
9082   /* If this is a unary operation whose operand has one of two values, apply
9083      our opcode to compute those values.  */
9084   else if (UNARY_P (x)
9085 	   && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9086     {
9087       *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9088       *pfalse = simplify_gen_unary (code, mode, false0,
9089 				    GET_MODE (XEXP (x, 0)));
9090       return cond0;
9091     }
9092 
9093   /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9094      make can't possibly match and would suppress other optimizations.  */
9095   else if (code == COMPARE)
9096     ;
9097 
9098   /* If this is a binary operation, see if either side has only one of two
9099      values.  If either one does or if both do and they are conditional on
9100      the same value, compute the new true and false values.  */
9101   else if (BINARY_P (x))
9102     {
9103       rtx op0 = XEXP (x, 0);
9104       rtx op1 = XEXP (x, 1);
9105       cond0 = if_then_else_cond (op0, &true0, &false0);
9106       cond1 = if_then_else_cond (op1, &true1, &false1);
9107 
9108       if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9109 	  && (REG_P (op0) || REG_P (op1)))
9110 	{
9111 	  /* Try to enable a simplification by undoing work done by
9112 	     if_then_else_cond if it converted a REG into something more
9113 	     complex.  */
9114 	  if (REG_P (op0))
9115 	    {
9116 	      cond0 = 0;
9117 	      true0 = false0 = op0;
9118 	    }
9119 	  else
9120 	    {
9121 	      cond1 = 0;
9122 	      true1 = false1 = op1;
9123 	    }
9124 	}
9125 
9126       if ((cond0 != 0 || cond1 != 0)
9127 	  && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9128 	{
9129 	  /* If if_then_else_cond returned zero, then true/false are the
9130 	     same rtl.  We must copy one of them to prevent invalid rtl
9131 	     sharing.  */
9132 	  if (cond0 == 0)
9133 	    true0 = copy_rtx (true0);
9134 	  else if (cond1 == 0)
9135 	    true1 = copy_rtx (true1);
9136 
9137 	  if (COMPARISON_P (x))
9138 	    {
9139 	      *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9140 						true0, true1);
9141 	      *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9142 						 false0, false1);
9143 	     }
9144 	  else
9145 	    {
9146 	      *ptrue = simplify_gen_binary (code, mode, true0, true1);
9147 	      *pfalse = simplify_gen_binary (code, mode, false0, false1);
9148 	    }
9149 
9150 	  return cond0 ? cond0 : cond1;
9151 	}
9152 
9153       /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9154 	 operands is zero when the other is nonzero, and vice-versa,
9155 	 and STORE_FLAG_VALUE is 1 or -1.  */
9156 
9157       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9158 	  && (code == PLUS || code == IOR || code == XOR || code == MINUS
9159 	      || code == UMAX)
9160 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9161 	{
9162 	  rtx op0 = XEXP (XEXP (x, 0), 1);
9163 	  rtx op1 = XEXP (XEXP (x, 1), 1);
9164 
9165 	  cond0 = XEXP (XEXP (x, 0), 0);
9166 	  cond1 = XEXP (XEXP (x, 1), 0);
9167 
9168 	  if (COMPARISON_P (cond0)
9169 	      && COMPARISON_P (cond1)
9170 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9171 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9172 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9173 		  || ((swap_condition (GET_CODE (cond0))
9174 		       == reversed_comparison_code (cond1, NULL))
9175 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9176 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9177 	      && ! side_effects_p (x))
9178 	    {
9179 	      *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9180 	      *pfalse = simplify_gen_binary (MULT, mode,
9181 					     (code == MINUS
9182 					      ? simplify_gen_unary (NEG, mode,
9183 								    op1, mode)
9184 					      : op1),
9185 					      const_true_rtx);
9186 	      return cond0;
9187 	    }
9188 	}
9189 
9190       /* Similarly for MULT, AND and UMIN, except that for these the result
9191 	 is always zero.  */
9192       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9193 	  && (code == MULT || code == AND || code == UMIN)
9194 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9195 	{
9196 	  cond0 = XEXP (XEXP (x, 0), 0);
9197 	  cond1 = XEXP (XEXP (x, 1), 0);
9198 
9199 	  if (COMPARISON_P (cond0)
9200 	      && COMPARISON_P (cond1)
9201 	      && SCALAR_INT_MODE_P (mode)
9202 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9203 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9204 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9205 		  || ((swap_condition (GET_CODE (cond0))
9206 		       == reversed_comparison_code (cond1, NULL))
9207 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9208 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9209 	      && ! side_effects_p (x))
9210 	    {
9211 	      *ptrue = *pfalse = const0_rtx;
9212 	      return cond0;
9213 	    }
9214 	}
9215     }
9216 
9217   else if (code == IF_THEN_ELSE)
9218     {
9219       /* If we have IF_THEN_ELSE already, extract the condition and
9220 	 canonicalize it if it is NE or EQ.  */
9221       cond0 = XEXP (x, 0);
9222       *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9223       if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9224 	return XEXP (cond0, 0);
9225       else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9226 	{
9227 	  *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9228 	  return XEXP (cond0, 0);
9229 	}
9230       else
9231 	return cond0;
9232     }
9233 
9234   /* If X is a SUBREG, we can narrow both the true and false values
9235      if the inner expression, if there is a condition.  */
9236   else if (code == SUBREG
9237 	   && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9238 					       &true0, &false0)))
9239     {
9240       true0 = simplify_gen_subreg (mode, true0,
9241 				   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9242       false0 = simplify_gen_subreg (mode, false0,
9243 				    GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9244       if (true0 && false0)
9245 	{
9246 	  *ptrue = true0;
9247 	  *pfalse = false0;
9248 	  return cond0;
9249 	}
9250     }
9251 
9252   /* If X is a constant, this isn't special and will cause confusions
9253      if we treat it as such.  Likewise if it is equivalent to a constant.  */
9254   else if (CONSTANT_P (x)
9255 	   || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9256     ;
9257 
9258   /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9259      will be least confusing to the rest of the compiler.  */
9260   else if (mode == BImode)
9261     {
9262       *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9263       return x;
9264     }
9265 
9266   /* If X is known to be either 0 or -1, those are the true and
9267      false values when testing X.  */
9268   else if (x == constm1_rtx || x == const0_rtx
9269 	   || (mode != VOIDmode && mode != BLKmode
9270 	       && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
9271     {
9272       *ptrue = constm1_rtx, *pfalse = const0_rtx;
9273       return x;
9274     }
9275 
9276   /* Likewise for 0 or a single bit.  */
9277   else if (HWI_COMPUTABLE_MODE_P (mode)
9278 	   && pow2p_hwi (nz = nonzero_bits (x, mode)))
9279     {
9280       *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9281       return x;
9282     }
9283 
9284   /* Otherwise fail; show no condition with true and false values the same.  */
9285   *ptrue = *pfalse = x;
9286   return 0;
9287 }
9288 
9289 /* Return the value of expression X given the fact that condition COND
9290    is known to be true when applied to REG as its first operand and VAL
9291    as its second.  X is known to not be shared and so can be modified in
9292    place.
9293 
9294    We only handle the simplest cases, and specifically those cases that
9295    arise with IF_THEN_ELSE expressions.  */
9296 
9297 static rtx
9298 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9299 {
9300   enum rtx_code code = GET_CODE (x);
9301   const char *fmt;
9302   int i, j;
9303 
9304   if (side_effects_p (x))
9305     return x;
9306 
9307   /* If either operand of the condition is a floating point value,
9308      then we have to avoid collapsing an EQ comparison.  */
9309   if (cond == EQ
9310       && rtx_equal_p (x, reg)
9311       && ! FLOAT_MODE_P (GET_MODE (x))
9312       && ! FLOAT_MODE_P (GET_MODE (val)))
9313     return val;
9314 
9315   if (cond == UNEQ && rtx_equal_p (x, reg))
9316     return val;
9317 
9318   /* If X is (abs REG) and we know something about REG's relationship
9319      with zero, we may be able to simplify this.  */
9320 
9321   if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9322     switch (cond)
9323       {
9324       case GE:  case GT:  case EQ:
9325 	return XEXP (x, 0);
9326       case LT:  case LE:
9327 	return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9328 				   XEXP (x, 0),
9329 				   GET_MODE (XEXP (x, 0)));
9330       default:
9331 	break;
9332       }
9333 
9334   /* The only other cases we handle are MIN, MAX, and comparisons if the
9335      operands are the same as REG and VAL.  */
9336 
9337   else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9338     {
9339       if (rtx_equal_p (XEXP (x, 0), val))
9340         {
9341 	  std::swap (val, reg);
9342 	  cond = swap_condition (cond);
9343         }
9344 
9345       if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9346 	{
9347 	  if (COMPARISON_P (x))
9348 	    {
9349 	      if (comparison_dominates_p (cond, code))
9350 		return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9351 
9352 	      code = reversed_comparison_code (x, NULL);
9353 	      if (code != UNKNOWN
9354 		  && comparison_dominates_p (cond, code))
9355 		return CONST0_RTX (GET_MODE (x));
9356 	      else
9357 		return x;
9358 	    }
9359 	  else if (code == SMAX || code == SMIN
9360 		   || code == UMIN || code == UMAX)
9361 	    {
9362 	      int unsignedp = (code == UMIN || code == UMAX);
9363 
9364 	      /* Do not reverse the condition when it is NE or EQ.
9365 		 This is because we cannot conclude anything about
9366 		 the value of 'SMAX (x, y)' when x is not equal to y,
9367 		 but we can when x equals y.  */
9368 	      if ((code == SMAX || code == UMAX)
9369 		  && ! (cond == EQ || cond == NE))
9370 		cond = reverse_condition (cond);
9371 
9372 	      switch (cond)
9373 		{
9374 		case GE:   case GT:
9375 		  return unsignedp ? x : XEXP (x, 1);
9376 		case LE:   case LT:
9377 		  return unsignedp ? x : XEXP (x, 0);
9378 		case GEU:  case GTU:
9379 		  return unsignedp ? XEXP (x, 1) : x;
9380 		case LEU:  case LTU:
9381 		  return unsignedp ? XEXP (x, 0) : x;
9382 		default:
9383 		  break;
9384 		}
9385 	    }
9386 	}
9387     }
9388   else if (code == SUBREG)
9389     {
9390       machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9391       rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9392 
9393       if (SUBREG_REG (x) != r)
9394 	{
9395 	  /* We must simplify subreg here, before we lose track of the
9396 	     original inner_mode.  */
9397 	  new_rtx = simplify_subreg (GET_MODE (x), r,
9398 				     inner_mode, SUBREG_BYTE (x));
9399 	  if (new_rtx)
9400 	    return new_rtx;
9401 	  else
9402 	    SUBST (SUBREG_REG (x), r);
9403 	}
9404 
9405       return x;
9406     }
9407   /* We don't have to handle SIGN_EXTEND here, because even in the
9408      case of replacing something with a modeless CONST_INT, a
9409      CONST_INT is already (supposed to be) a valid sign extension for
9410      its narrower mode, which implies it's already properly
9411      sign-extended for the wider mode.  Now, for ZERO_EXTEND, the
9412      story is different.  */
9413   else if (code == ZERO_EXTEND)
9414     {
9415       machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9416       rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9417 
9418       if (XEXP (x, 0) != r)
9419 	{
9420 	  /* We must simplify the zero_extend here, before we lose
9421 	     track of the original inner_mode.  */
9422 	  new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9423 					      r, inner_mode);
9424 	  if (new_rtx)
9425 	    return new_rtx;
9426 	  else
9427 	    SUBST (XEXP (x, 0), r);
9428 	}
9429 
9430       return x;
9431     }
9432 
9433   fmt = GET_RTX_FORMAT (code);
9434   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9435     {
9436       if (fmt[i] == 'e')
9437 	SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9438       else if (fmt[i] == 'E')
9439 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9440 	  SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9441 						cond, reg, val));
9442     }
9443 
9444   return x;
9445 }
9446 
9447 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9448    assignment as a field assignment.  */
9449 
9450 static int
9451 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9452 {
9453   if (widen_x && GET_MODE (x) != GET_MODE (y))
9454     {
9455       if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (y)))
9456 	return 0;
9457       if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9458 	return 0;
9459       /* For big endian, adjust the memory offset.  */
9460       if (BYTES_BIG_ENDIAN)
9461 	x = adjust_address_nv (x, GET_MODE (y),
9462 			       -subreg_lowpart_offset (GET_MODE (x),
9463 						       GET_MODE (y)));
9464       else
9465 	x = adjust_address_nv (x, GET_MODE (y), 0);
9466     }
9467 
9468   if (x == y || rtx_equal_p (x, y))
9469     return 1;
9470 
9471   if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9472     return 0;
9473 
9474   /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9475      Note that all SUBREGs of MEM are paradoxical; otherwise they
9476      would have been rewritten.  */
9477   if (MEM_P (x) && GET_CODE (y) == SUBREG
9478       && MEM_P (SUBREG_REG (y))
9479       && rtx_equal_p (SUBREG_REG (y),
9480 		      gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9481     return 1;
9482 
9483   if (MEM_P (y) && GET_CODE (x) == SUBREG
9484       && MEM_P (SUBREG_REG (x))
9485       && rtx_equal_p (SUBREG_REG (x),
9486 		      gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9487     return 1;
9488 
9489   /* We used to see if get_last_value of X and Y were the same but that's
9490      not correct.  In one direction, we'll cause the assignment to have
9491      the wrong destination and in the case, we'll import a register into this
9492      insn that might have already have been dead.   So fail if none of the
9493      above cases are true.  */
9494   return 0;
9495 }
9496 
9497 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9498    Return that assignment if so.
9499 
9500    We only handle the most common cases.  */
9501 
9502 static rtx
9503 make_field_assignment (rtx x)
9504 {
9505   rtx dest = SET_DEST (x);
9506   rtx src = SET_SRC (x);
9507   rtx assign;
9508   rtx rhs, lhs;
9509   HOST_WIDE_INT c1;
9510   HOST_WIDE_INT pos;
9511   unsigned HOST_WIDE_INT len;
9512   rtx other;
9513   machine_mode mode;
9514 
9515   /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9516      a clear of a one-bit field.  We will have changed it to
9517      (and (rotate (const_int -2) POS) DEST), so check for that.  Also check
9518      for a SUBREG.  */
9519 
9520   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9521       && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9522       && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9523       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9524     {
9525       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9526 				1, 1, 1, 0);
9527       if (assign != 0)
9528 	return gen_rtx_SET (assign, const0_rtx);
9529       return x;
9530     }
9531 
9532   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9533       && subreg_lowpart_p (XEXP (src, 0))
9534       && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9535 	  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9536       && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9537       && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9538       && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9539       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9540     {
9541       assign = make_extraction (VOIDmode, dest, 0,
9542 				XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9543 				1, 1, 1, 0);
9544       if (assign != 0)
9545 	return gen_rtx_SET (assign, const0_rtx);
9546       return x;
9547     }
9548 
9549   /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9550      one-bit field.  */
9551   if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9552       && XEXP (XEXP (src, 0), 0) == const1_rtx
9553       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9554     {
9555       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9556 				1, 1, 1, 0);
9557       if (assign != 0)
9558 	return gen_rtx_SET (assign, const1_rtx);
9559       return x;
9560     }
9561 
9562   /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9563      SRC is an AND with all bits of that field set, then we can discard
9564      the AND.  */
9565   if (GET_CODE (dest) == ZERO_EXTRACT
9566       && CONST_INT_P (XEXP (dest, 1))
9567       && GET_CODE (src) == AND
9568       && CONST_INT_P (XEXP (src, 1)))
9569     {
9570       HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9571       unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9572       unsigned HOST_WIDE_INT ze_mask;
9573 
9574       if (width >= HOST_BITS_PER_WIDE_INT)
9575 	ze_mask = -1;
9576       else
9577 	ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9578 
9579       /* Complete overlap.  We can remove the source AND.  */
9580       if ((and_mask & ze_mask) == ze_mask)
9581 	return gen_rtx_SET (dest, XEXP (src, 0));
9582 
9583       /* Partial overlap.  We can reduce the source AND.  */
9584       if ((and_mask & ze_mask) != and_mask)
9585 	{
9586 	  mode = GET_MODE (src);
9587 	  src = gen_rtx_AND (mode, XEXP (src, 0),
9588 			     gen_int_mode (and_mask & ze_mask, mode));
9589 	  return gen_rtx_SET (dest, src);
9590 	}
9591     }
9592 
9593   /* The other case we handle is assignments into a constant-position
9594      field.  They look like (ior/xor (and DEST C1) OTHER).  If C1 represents
9595      a mask that has all one bits except for a group of zero bits and
9596      OTHER is known to have zeros where C1 has ones, this is such an
9597      assignment.  Compute the position and length from C1.  Shift OTHER
9598      to the appropriate position, force it to the required mode, and
9599      make the extraction.  Check for the AND in both operands.  */
9600 
9601   /* One or more SUBREGs might obscure the constant-position field
9602      assignment.  The first one we are likely to encounter is an outer
9603      narrowing SUBREG, which we can just strip for the purposes of
9604      identifying the constant-field assignment.  */
9605   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src))
9606     src = SUBREG_REG (src);
9607 
9608   if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9609     return x;
9610 
9611   rhs = expand_compound_operation (XEXP (src, 0));
9612   lhs = expand_compound_operation (XEXP (src, 1));
9613 
9614   if (GET_CODE (rhs) == AND
9615       && CONST_INT_P (XEXP (rhs, 1))
9616       && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9617     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9618   /* The second SUBREG that might get in the way is a paradoxical
9619      SUBREG around the first operand of the AND.  We want to
9620      pretend the operand is as wide as the destination here.   We
9621      do this by adjusting the MEM to wider mode for the sole
9622      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9623      note this trick only works for MEMs.  */
9624   else if (GET_CODE (rhs) == AND
9625 	   && paradoxical_subreg_p (XEXP (rhs, 0))
9626 	   && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9627 	   && CONST_INT_P (XEXP (rhs, 1))
9628 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9629 						dest, true))
9630     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9631   else if (GET_CODE (lhs) == AND
9632 	   && CONST_INT_P (XEXP (lhs, 1))
9633 	   && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9634     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9635   /* The second SUBREG that might get in the way is a paradoxical
9636      SUBREG around the first operand of the AND.  We want to
9637      pretend the operand is as wide as the destination here.   We
9638      do this by adjusting the MEM to wider mode for the sole
9639      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9640      note this trick only works for MEMs.  */
9641   else if (GET_CODE (lhs) == AND
9642 	   && paradoxical_subreg_p (XEXP (lhs, 0))
9643 	   && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9644 	   && CONST_INT_P (XEXP (lhs, 1))
9645 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9646 						dest, true))
9647     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9648   else
9649     return x;
9650 
9651   pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9652   if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9653       || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9654       || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9655     return x;
9656 
9657   assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9658   if (assign == 0)
9659     return x;
9660 
9661   /* The mode to use for the source is the mode of the assignment, or of
9662      what is inside a possible STRICT_LOW_PART.  */
9663   mode = (GET_CODE (assign) == STRICT_LOW_PART
9664 	  ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9665 
9666   /* Shift OTHER right POS places and make it the source, restricting it
9667      to the proper length and mode.  */
9668 
9669   src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9670 						     GET_MODE (src),
9671 						     other, pos),
9672 			       dest);
9673   src = force_to_mode (src, mode,
9674 		       GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9675 		       ? HOST_WIDE_INT_M1U
9676 		       : (HOST_WIDE_INT_1U << len) - 1,
9677 		       0);
9678 
9679   /* If SRC is masked by an AND that does not make a difference in
9680      the value being stored, strip it.  */
9681   if (GET_CODE (assign) == ZERO_EXTRACT
9682       && CONST_INT_P (XEXP (assign, 1))
9683       && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9684       && GET_CODE (src) == AND
9685       && CONST_INT_P (XEXP (src, 1))
9686       && UINTVAL (XEXP (src, 1))
9687 	 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9688     src = XEXP (src, 0);
9689 
9690   return gen_rtx_SET (assign, src);
9691 }
9692 
9693 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9694    if so.  */
9695 
9696 static rtx
9697 apply_distributive_law (rtx x)
9698 {
9699   enum rtx_code code = GET_CODE (x);
9700   enum rtx_code inner_code;
9701   rtx lhs, rhs, other;
9702   rtx tem;
9703 
9704   /* Distributivity is not true for floating point as it can change the
9705      value.  So we don't do it unless -funsafe-math-optimizations.  */
9706   if (FLOAT_MODE_P (GET_MODE (x))
9707       && ! flag_unsafe_math_optimizations)
9708     return x;
9709 
9710   /* The outer operation can only be one of the following:  */
9711   if (code != IOR && code != AND && code != XOR
9712       && code != PLUS && code != MINUS)
9713     return x;
9714 
9715   lhs = XEXP (x, 0);
9716   rhs = XEXP (x, 1);
9717 
9718   /* If either operand is a primitive we can't do anything, so get out
9719      fast.  */
9720   if (OBJECT_P (lhs) || OBJECT_P (rhs))
9721     return x;
9722 
9723   lhs = expand_compound_operation (lhs);
9724   rhs = expand_compound_operation (rhs);
9725   inner_code = GET_CODE (lhs);
9726   if (inner_code != GET_CODE (rhs))
9727     return x;
9728 
9729   /* See if the inner and outer operations distribute.  */
9730   switch (inner_code)
9731     {
9732     case LSHIFTRT:
9733     case ASHIFTRT:
9734     case AND:
9735     case IOR:
9736       /* These all distribute except over PLUS.  */
9737       if (code == PLUS || code == MINUS)
9738 	return x;
9739       break;
9740 
9741     case MULT:
9742       if (code != PLUS && code != MINUS)
9743 	return x;
9744       break;
9745 
9746     case ASHIFT:
9747       /* This is also a multiply, so it distributes over everything.  */
9748       break;
9749 
9750     /* This used to handle SUBREG, but this turned out to be counter-
9751        productive, since (subreg (op ...)) usually is not handled by
9752        insn patterns, and this "optimization" therefore transformed
9753        recognizable patterns into unrecognizable ones.  Therefore the
9754        SUBREG case was removed from here.
9755 
9756        It is possible that distributing SUBREG over arithmetic operations
9757        leads to an intermediate result than can then be optimized further,
9758        e.g. by moving the outer SUBREG to the other side of a SET as done
9759        in simplify_set.  This seems to have been the original intent of
9760        handling SUBREGs here.
9761 
9762        However, with current GCC this does not appear to actually happen,
9763        at least on major platforms.  If some case is found where removing
9764        the SUBREG case here prevents follow-on optimizations, distributing
9765        SUBREGs ought to be re-added at that place, e.g. in simplify_set.  */
9766 
9767     default:
9768       return x;
9769     }
9770 
9771   /* Set LHS and RHS to the inner operands (A and B in the example
9772      above) and set OTHER to the common operand (C in the example).
9773      There is only one way to do this unless the inner operation is
9774      commutative.  */
9775   if (COMMUTATIVE_ARITH_P (lhs)
9776       && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9777     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9778   else if (COMMUTATIVE_ARITH_P (lhs)
9779 	   && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9780     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9781   else if (COMMUTATIVE_ARITH_P (lhs)
9782 	   && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9783     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9784   else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9785     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9786   else
9787     return x;
9788 
9789   /* Form the new inner operation, seeing if it simplifies first.  */
9790   tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9791 
9792   /* There is one exception to the general way of distributing:
9793      (a | c) ^ (b | c) -> (a ^ b) & ~c  */
9794   if (code == XOR && inner_code == IOR)
9795     {
9796       inner_code = AND;
9797       other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9798     }
9799 
9800   /* We may be able to continuing distributing the result, so call
9801      ourselves recursively on the inner operation before forming the
9802      outer operation, which we return.  */
9803   return simplify_gen_binary (inner_code, GET_MODE (x),
9804 			      apply_distributive_law (tem), other);
9805 }
9806 
9807 /* See if X is of the form (* (+ A B) C), and if so convert to
9808    (+ (* A C) (* B C)) and try to simplify.
9809 
9810    Most of the time, this results in no change.  However, if some of
9811    the operands are the same or inverses of each other, simplifications
9812    will result.
9813 
9814    For example, (and (ior A B) (not B)) can occur as the result of
9815    expanding a bit field assignment.  When we apply the distributive
9816    law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9817    which then simplifies to (and (A (not B))).
9818 
9819    Note that no checks happen on the validity of applying the inverse
9820    distributive law.  This is pointless since we can do it in the
9821    few places where this routine is called.
9822 
9823    N is the index of the term that is decomposed (the arithmetic operation,
9824    i.e. (+ A B) in the first example above).  !N is the index of the term that
9825    is distributed, i.e. of C in the first example above.  */
9826 static rtx
9827 distribute_and_simplify_rtx (rtx x, int n)
9828 {
9829   machine_mode mode;
9830   enum rtx_code outer_code, inner_code;
9831   rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9832 
9833   /* Distributivity is not true for floating point as it can change the
9834      value.  So we don't do it unless -funsafe-math-optimizations.  */
9835   if (FLOAT_MODE_P (GET_MODE (x))
9836       && ! flag_unsafe_math_optimizations)
9837     return NULL_RTX;
9838 
9839   decomposed = XEXP (x, n);
9840   if (!ARITHMETIC_P (decomposed))
9841     return NULL_RTX;
9842 
9843   mode = GET_MODE (x);
9844   outer_code = GET_CODE (x);
9845   distributed = XEXP (x, !n);
9846 
9847   inner_code = GET_CODE (decomposed);
9848   inner_op0 = XEXP (decomposed, 0);
9849   inner_op1 = XEXP (decomposed, 1);
9850 
9851   /* Special case (and (xor B C) (not A)), which is equivalent to
9852      (xor (ior A B) (ior A C))  */
9853   if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9854     {
9855       distributed = XEXP (distributed, 0);
9856       outer_code = IOR;
9857     }
9858 
9859   if (n == 0)
9860     {
9861       /* Distribute the second term.  */
9862       new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9863       new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9864     }
9865   else
9866     {
9867       /* Distribute the first term.  */
9868       new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9869       new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9870     }
9871 
9872   tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9873 						     new_op0, new_op1));
9874   if (GET_CODE (tmp) != outer_code
9875       && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9876 	  < set_src_cost (x, mode, optimize_this_for_speed_p)))
9877     return tmp;
9878 
9879   return NULL_RTX;
9880 }
9881 
9882 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9883    in MODE.  Return an equivalent form, if different from (and VAROP
9884    (const_int CONSTOP)).  Otherwise, return NULL_RTX.  */
9885 
9886 static rtx
9887 simplify_and_const_int_1 (machine_mode mode, rtx varop,
9888 			  unsigned HOST_WIDE_INT constop)
9889 {
9890   unsigned HOST_WIDE_INT nonzero;
9891   unsigned HOST_WIDE_INT orig_constop;
9892   rtx orig_varop;
9893   int i;
9894 
9895   orig_varop = varop;
9896   orig_constop = constop;
9897   if (GET_CODE (varop) == CLOBBER)
9898     return NULL_RTX;
9899 
9900   /* Simplify VAROP knowing that we will be only looking at some of the
9901      bits in it.
9902 
9903      Note by passing in CONSTOP, we guarantee that the bits not set in
9904      CONSTOP are not significant and will never be examined.  We must
9905      ensure that is the case by explicitly masking out those bits
9906      before returning.  */
9907   varop = force_to_mode (varop, mode, constop, 0);
9908 
9909   /* If VAROP is a CLOBBER, we will fail so return it.  */
9910   if (GET_CODE (varop) == CLOBBER)
9911     return varop;
9912 
9913   /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9914      to VAROP and return the new constant.  */
9915   if (CONST_INT_P (varop))
9916     return gen_int_mode (INTVAL (varop) & constop, mode);
9917 
9918   /* See what bits may be nonzero in VAROP.  Unlike the general case of
9919      a call to nonzero_bits, here we don't care about bits outside
9920      MODE.  */
9921 
9922   nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9923 
9924   /* Turn off all bits in the constant that are known to already be zero.
9925      Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9926      which is tested below.  */
9927 
9928   constop &= nonzero;
9929 
9930   /* If we don't have any bits left, return zero.  */
9931   if (constop == 0)
9932     return const0_rtx;
9933 
9934   /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9935      a power of two, we can replace this with an ASHIFT.  */
9936   if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9937       && (i = exact_log2 (constop)) >= 0)
9938     return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9939 
9940   /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9941      or XOR, then try to apply the distributive law.  This may eliminate
9942      operations if either branch can be simplified because of the AND.
9943      It may also make some cases more complex, but those cases probably
9944      won't match a pattern either with or without this.  */
9945 
9946   if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9947     return
9948       gen_lowpart
9949 	(mode,
9950 	 apply_distributive_law
9951 	 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9952 			       simplify_and_const_int (NULL_RTX,
9953 						       GET_MODE (varop),
9954 						       XEXP (varop, 0),
9955 						       constop),
9956 			       simplify_and_const_int (NULL_RTX,
9957 						       GET_MODE (varop),
9958 						       XEXP (varop, 1),
9959 						       constop))));
9960 
9961   /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9962      the AND and see if one of the operands simplifies to zero.  If so, we
9963      may eliminate it.  */
9964 
9965   if (GET_CODE (varop) == PLUS
9966       && pow2p_hwi (constop + 1))
9967     {
9968       rtx o0, o1;
9969 
9970       o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9971       o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9972       if (o0 == const0_rtx)
9973 	return o1;
9974       if (o1 == const0_rtx)
9975 	return o0;
9976     }
9977 
9978   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
9979   varop = gen_lowpart (mode, varop);
9980   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9981     return NULL_RTX;
9982 
9983   /* If we are only masking insignificant bits, return VAROP.  */
9984   if (constop == nonzero)
9985     return varop;
9986 
9987   if (varop == orig_varop && constop == orig_constop)
9988     return NULL_RTX;
9989 
9990   /* Otherwise, return an AND.  */
9991   return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9992 }
9993 
9994 
9995 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9996    in MODE.
9997 
9998    Return an equivalent form, if different from X.  Otherwise, return X.  If
9999    X is zero, we are to always construct the equivalent form.  */
10000 
10001 static rtx
10002 simplify_and_const_int (rtx x, machine_mode mode, rtx varop,
10003 			unsigned HOST_WIDE_INT constop)
10004 {
10005   rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10006   if (tem)
10007     return tem;
10008 
10009   if (!x)
10010     x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10011 			     gen_int_mode (constop, mode));
10012   if (GET_MODE (x) != mode)
10013     x = gen_lowpart (mode, x);
10014   return x;
10015 }
10016 
10017 /* Given a REG, X, compute which bits in X can be nonzero.
10018    We don't care about bits outside of those defined in MODE.
10019 
10020    For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10021    a shift, AND, or zero_extract, we can do better.  */
10022 
10023 static rtx
10024 reg_nonzero_bits_for_combine (const_rtx x, machine_mode mode,
10025 			      const_rtx known_x ATTRIBUTE_UNUSED,
10026 			      machine_mode known_mode ATTRIBUTE_UNUSED,
10027 			      unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
10028 			      unsigned HOST_WIDE_INT *nonzero)
10029 {
10030   rtx tem;
10031   reg_stat_type *rsp;
10032 
10033   /* If X is a register whose nonzero bits value is current, use it.
10034      Otherwise, if X is a register whose value we can find, use that
10035      value.  Otherwise, use the previously-computed global nonzero bits
10036      for this register.  */
10037 
10038   rsp = &reg_stat[REGNO (x)];
10039   if (rsp->last_set_value != 0
10040       && (rsp->last_set_mode == mode
10041 	  || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10042 	      && GET_MODE_CLASS (mode) == MODE_INT))
10043       && ((rsp->last_set_label >= label_tick_ebb_start
10044 	   && rsp->last_set_label < label_tick)
10045 	  || (rsp->last_set_label == label_tick
10046               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10047 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10048 	      && REGNO (x) < reg_n_sets_max
10049 	      && REG_N_SETS (REGNO (x)) == 1
10050 	      && !REGNO_REG_SET_P
10051 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10052 		   REGNO (x)))))
10053     {
10054       /* Note that, even if the precision of last_set_mode is lower than that
10055 	 of mode, record_value_for_reg invoked nonzero_bits on the register
10056 	 with nonzero_bits_mode (because last_set_mode is necessarily integral
10057 	 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10058 	 are all valid, hence in mode too since nonzero_bits_mode is defined
10059 	 to the largest HWI_COMPUTABLE_MODE_P mode.  */
10060       *nonzero &= rsp->last_set_nonzero_bits;
10061       return NULL;
10062     }
10063 
10064   tem = get_last_value (x);
10065   if (tem)
10066     {
10067       if (SHORT_IMMEDIATES_SIGN_EXTEND)
10068 	tem = sign_extend_short_imm (tem, GET_MODE (x),
10069 				     GET_MODE_PRECISION (mode));
10070 
10071       return tem;
10072     }
10073 
10074   if (nonzero_sign_valid && rsp->nonzero_bits)
10075     {
10076       unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10077 
10078       if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
10079 	/* We don't know anything about the upper bits.  */
10080 	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
10081 
10082       *nonzero &= mask;
10083     }
10084 
10085   return NULL;
10086 }
10087 
10088 /* Return the number of bits at the high-order end of X that are known to
10089    be equal to the sign bit.  X will be used in mode MODE; if MODE is
10090    VOIDmode, X will be used in its own mode.  The returned value  will always
10091    be between 1 and the number of bits in MODE.  */
10092 
10093 static rtx
10094 reg_num_sign_bit_copies_for_combine (const_rtx x, machine_mode mode,
10095 				     const_rtx known_x ATTRIBUTE_UNUSED,
10096 				     machine_mode known_mode
10097 				     ATTRIBUTE_UNUSED,
10098 				     unsigned int known_ret ATTRIBUTE_UNUSED,
10099 				     unsigned int *result)
10100 {
10101   rtx tem;
10102   reg_stat_type *rsp;
10103 
10104   rsp = &reg_stat[REGNO (x)];
10105   if (rsp->last_set_value != 0
10106       && rsp->last_set_mode == mode
10107       && ((rsp->last_set_label >= label_tick_ebb_start
10108 	   && rsp->last_set_label < label_tick)
10109 	  || (rsp->last_set_label == label_tick
10110               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10111 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10112 	      && REGNO (x) < reg_n_sets_max
10113 	      && REG_N_SETS (REGNO (x)) == 1
10114 	      && !REGNO_REG_SET_P
10115 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10116 		   REGNO (x)))))
10117     {
10118       *result = rsp->last_set_sign_bit_copies;
10119       return NULL;
10120     }
10121 
10122   tem = get_last_value (x);
10123   if (tem != 0)
10124     return tem;
10125 
10126   if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10127       && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
10128     *result = rsp->sign_bit_copies;
10129 
10130   return NULL;
10131 }
10132 
10133 /* Return the number of "extended" bits there are in X, when interpreted
10134    as a quantity in MODE whose signedness is indicated by UNSIGNEDP.  For
10135    unsigned quantities, this is the number of high-order zero bits.
10136    For signed quantities, this is the number of copies of the sign bit
10137    minus 1.  In both case, this function returns the number of "spare"
10138    bits.  For example, if two quantities for which this function returns
10139    at least 1 are added, the addition is known not to overflow.
10140 
10141    This function will always return 0 unless called during combine, which
10142    implies that it must be called from a define_split.  */
10143 
10144 unsigned int
10145 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10146 {
10147   if (nonzero_sign_valid == 0)
10148     return 0;
10149 
10150   return (unsignedp
10151 	  ? (HWI_COMPUTABLE_MODE_P (mode)
10152 	     ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
10153 			       - floor_log2 (nonzero_bits (x, mode)))
10154 	     : 0)
10155 	  : num_sign_bit_copies (x, mode) - 1);
10156 }
10157 
10158 /* This function is called from `simplify_shift_const' to merge two
10159    outer operations.  Specifically, we have already found that we need
10160    to perform operation *POP0 with constant *PCONST0 at the outermost
10161    position.  We would now like to also perform OP1 with constant CONST1
10162    (with *POP0 being done last).
10163 
10164    Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10165    the resulting operation.  *PCOMP_P is set to 1 if we would need to
10166    complement the innermost operand, otherwise it is unchanged.
10167 
10168    MODE is the mode in which the operation will be done.  No bits outside
10169    the width of this mode matter.  It is assumed that the width of this mode
10170    is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10171 
10172    If *POP0 or OP1 are UNKNOWN, it means no operation is required.  Only NEG, PLUS,
10173    IOR, XOR, and AND are supported.  We may set *POP0 to SET if the proper
10174    result is simply *PCONST0.
10175 
10176    If the resulting operation cannot be expressed as one operation, we
10177    return 0 and do not change *POP0, *PCONST0, and *PCOMP_P.  */
10178 
10179 static int
10180 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10181 {
10182   enum rtx_code op0 = *pop0;
10183   HOST_WIDE_INT const0 = *pconst0;
10184 
10185   const0 &= GET_MODE_MASK (mode);
10186   const1 &= GET_MODE_MASK (mode);
10187 
10188   /* If OP0 is an AND, clear unimportant bits in CONST1.  */
10189   if (op0 == AND)
10190     const1 &= const0;
10191 
10192   /* If OP0 or OP1 is UNKNOWN, this is easy.  Similarly if they are the same or
10193      if OP0 is SET.  */
10194 
10195   if (op1 == UNKNOWN || op0 == SET)
10196     return 1;
10197 
10198   else if (op0 == UNKNOWN)
10199     op0 = op1, const0 = const1;
10200 
10201   else if (op0 == op1)
10202     {
10203       switch (op0)
10204 	{
10205 	case AND:
10206 	  const0 &= const1;
10207 	  break;
10208 	case IOR:
10209 	  const0 |= const1;
10210 	  break;
10211 	case XOR:
10212 	  const0 ^= const1;
10213 	  break;
10214 	case PLUS:
10215 	  const0 += const1;
10216 	  break;
10217 	case NEG:
10218 	  op0 = UNKNOWN;
10219 	  break;
10220 	default:
10221 	  break;
10222 	}
10223     }
10224 
10225   /* Otherwise, if either is a PLUS or NEG, we can't do anything.  */
10226   else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10227     return 0;
10228 
10229   /* If the two constants aren't the same, we can't do anything.  The
10230      remaining six cases can all be done.  */
10231   else if (const0 != const1)
10232     return 0;
10233 
10234   else
10235     switch (op0)
10236       {
10237       case IOR:
10238 	if (op1 == AND)
10239 	  /* (a & b) | b == b */
10240 	  op0 = SET;
10241 	else /* op1 == XOR */
10242 	  /* (a ^ b) | b == a | b */
10243 	  {;}
10244 	break;
10245 
10246       case XOR:
10247 	if (op1 == AND)
10248 	  /* (a & b) ^ b == (~a) & b */
10249 	  op0 = AND, *pcomp_p = 1;
10250 	else /* op1 == IOR */
10251 	  /* (a | b) ^ b == a & ~b */
10252 	  op0 = AND, const0 = ~const0;
10253 	break;
10254 
10255       case AND:
10256 	if (op1 == IOR)
10257 	  /* (a | b) & b == b */
10258 	op0 = SET;
10259 	else /* op1 == XOR */
10260 	  /* (a ^ b) & b) == (~a) & b */
10261 	  *pcomp_p = 1;
10262 	break;
10263       default:
10264 	break;
10265       }
10266 
10267   /* Check for NO-OP cases.  */
10268   const0 &= GET_MODE_MASK (mode);
10269   if (const0 == 0
10270       && (op0 == IOR || op0 == XOR || op0 == PLUS))
10271     op0 = UNKNOWN;
10272   else if (const0 == 0 && op0 == AND)
10273     op0 = SET;
10274   else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10275 	   && op0 == AND)
10276     op0 = UNKNOWN;
10277 
10278   *pop0 = op0;
10279 
10280   /* ??? Slightly redundant with the above mask, but not entirely.
10281      Moving this above means we'd have to sign-extend the mode mask
10282      for the final test.  */
10283   if (op0 != UNKNOWN && op0 != NEG)
10284     *pconst0 = trunc_int_for_mode (const0, mode);
10285 
10286   return 1;
10287 }
10288 
10289 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10290    the shift in.  The original shift operation CODE is performed on OP in
10291    ORIG_MODE.  Return the wider mode MODE if we can perform the operation
10292    in that mode.  Return ORIG_MODE otherwise.  We can also assume that the
10293    result of the shift is subject to operation OUTER_CODE with operand
10294    OUTER_CONST.  */
10295 
10296 static machine_mode
10297 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10298 		      machine_mode orig_mode, machine_mode mode,
10299 		      enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10300 {
10301   if (orig_mode == mode)
10302     return mode;
10303   gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10304 
10305   /* In general we can't perform in wider mode for right shift and rotate.  */
10306   switch (code)
10307     {
10308     case ASHIFTRT:
10309       /* We can still widen if the bits brought in from the left are identical
10310 	 to the sign bit of ORIG_MODE.  */
10311       if (num_sign_bit_copies (op, mode)
10312 	  > (unsigned) (GET_MODE_PRECISION (mode)
10313 			- GET_MODE_PRECISION (orig_mode)))
10314 	return mode;
10315       return orig_mode;
10316 
10317     case LSHIFTRT:
10318       /* Similarly here but with zero bits.  */
10319       if (HWI_COMPUTABLE_MODE_P (mode)
10320 	  && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10321 	return mode;
10322 
10323       /* We can also widen if the bits brought in will be masked off.  This
10324 	 operation is performed in ORIG_MODE.  */
10325       if (outer_code == AND)
10326 	{
10327 	  int care_bits = low_bitmask_len (orig_mode, outer_const);
10328 
10329 	  if (care_bits >= 0
10330 	      && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10331 	    return mode;
10332 	}
10333       /* fall through */
10334 
10335     case ROTATE:
10336       return orig_mode;
10337 
10338     case ROTATERT:
10339       gcc_unreachable ();
10340 
10341     default:
10342       return mode;
10343     }
10344 }
10345 
10346 /* Simplify a shift of VAROP by ORIG_COUNT bits.  CODE says what kind
10347    of shift.  The result of the shift is RESULT_MODE.  Return NULL_RTX
10348    if we cannot simplify it.  Otherwise, return a simplified value.
10349 
10350    The shift is normally computed in the widest mode we find in VAROP, as
10351    long as it isn't a different number of words than RESULT_MODE.  Exceptions
10352    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
10353 
10354 static rtx
10355 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10356 			rtx varop, int orig_count)
10357 {
10358   enum rtx_code orig_code = code;
10359   rtx orig_varop = varop;
10360   int count;
10361   machine_mode mode = result_mode;
10362   machine_mode shift_mode, tmode;
10363   unsigned int mode_words
10364     = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10365   /* We form (outer_op (code varop count) (outer_const)).  */
10366   enum rtx_code outer_op = UNKNOWN;
10367   HOST_WIDE_INT outer_const = 0;
10368   int complement_p = 0;
10369   rtx new_rtx, x;
10370 
10371   /* Make sure and truncate the "natural" shift on the way in.  We don't
10372      want to do this inside the loop as it makes it more difficult to
10373      combine shifts.  */
10374   if (SHIFT_COUNT_TRUNCATED)
10375     orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10376 
10377   /* If we were given an invalid count, don't do anything except exactly
10378      what was requested.  */
10379 
10380   if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10381     return NULL_RTX;
10382 
10383   count = orig_count;
10384 
10385   /* Unless one of the branches of the `if' in this loop does a `continue',
10386      we will `break' the loop after the `if'.  */
10387 
10388   while (count != 0)
10389     {
10390       /* If we have an operand of (clobber (const_int 0)), fail.  */
10391       if (GET_CODE (varop) == CLOBBER)
10392 	return NULL_RTX;
10393 
10394       /* Convert ROTATERT to ROTATE.  */
10395       if (code == ROTATERT)
10396 	{
10397 	  unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10398 	  code = ROTATE;
10399 	  count = bitsize - count;
10400 	}
10401 
10402       shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
10403 					 mode, outer_op, outer_const);
10404       machine_mode shift_unit_mode = GET_MODE_INNER (shift_mode);
10405 
10406       /* Handle cases where the count is greater than the size of the mode
10407 	 minus 1.  For ASHIFT, use the size minus one as the count (this can
10408 	 occur when simplifying (lshiftrt (ashiftrt ..))).  For rotates,
10409 	 take the count modulo the size.  For other shifts, the result is
10410 	 zero.
10411 
10412 	 Since these shifts are being produced by the compiler by combining
10413 	 multiple operations, each of which are defined, we know what the
10414 	 result is supposed to be.  */
10415 
10416       if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10417 	{
10418 	  if (code == ASHIFTRT)
10419 	    count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10420 	  else if (code == ROTATE || code == ROTATERT)
10421 	    count %= GET_MODE_PRECISION (shift_unit_mode);
10422 	  else
10423 	    {
10424 	      /* We can't simply return zero because there may be an
10425 		 outer op.  */
10426 	      varop = const0_rtx;
10427 	      count = 0;
10428 	      break;
10429 	    }
10430 	}
10431 
10432       /* If we discovered we had to complement VAROP, leave.  Making a NOT
10433 	 here would cause an infinite loop.  */
10434       if (complement_p)
10435 	break;
10436 
10437       if (shift_mode == shift_unit_mode)
10438 	{
10439 	  /* An arithmetic right shift of a quantity known to be -1 or 0
10440 	     is a no-op.  */
10441 	  if (code == ASHIFTRT
10442 	      && (num_sign_bit_copies (varop, shift_unit_mode)
10443 		  == GET_MODE_PRECISION (shift_unit_mode)))
10444 	    {
10445 	      count = 0;
10446 	      break;
10447 	    }
10448 
10449 	  /* If we are doing an arithmetic right shift and discarding all but
10450 	     the sign bit copies, this is equivalent to doing a shift by the
10451 	     bitsize minus one.  Convert it into that shift because it will
10452 	     often allow other simplifications.  */
10453 
10454 	  if (code == ASHIFTRT
10455 	      && (count + num_sign_bit_copies (varop, shift_unit_mode)
10456 		  >= GET_MODE_PRECISION (shift_unit_mode)))
10457 	    count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10458 
10459 	  /* We simplify the tests below and elsewhere by converting
10460 	     ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10461 	     `make_compound_operation' will convert it to an ASHIFTRT for
10462 	     those machines (such as VAX) that don't have an LSHIFTRT.  */
10463 	  if (code == ASHIFTRT
10464 	      && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10465 	      && val_signbit_known_clear_p (shift_unit_mode,
10466 					    nonzero_bits (varop,
10467 							  shift_unit_mode)))
10468 	    code = LSHIFTRT;
10469 
10470 	  if (((code == LSHIFTRT
10471 		&& HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10472 		&& !(nonzero_bits (varop, shift_unit_mode) >> count))
10473 	       || (code == ASHIFT
10474 		   && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10475 		   && !((nonzero_bits (varop, shift_unit_mode) << count)
10476 			& GET_MODE_MASK (shift_unit_mode))))
10477 	      && !side_effects_p (varop))
10478 	    varop = const0_rtx;
10479 	}
10480 
10481       switch (GET_CODE (varop))
10482 	{
10483 	case SIGN_EXTEND:
10484 	case ZERO_EXTEND:
10485 	case SIGN_EXTRACT:
10486 	case ZERO_EXTRACT:
10487 	  new_rtx = expand_compound_operation (varop);
10488 	  if (new_rtx != varop)
10489 	    {
10490 	      varop = new_rtx;
10491 	      continue;
10492 	    }
10493 	  break;
10494 
10495 	case MEM:
10496 	  /* The following rules apply only to scalars.  */
10497 	  if (shift_mode != shift_unit_mode)
10498 	    break;
10499 
10500 	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10501 	     minus the width of a smaller mode, we can do this with a
10502 	     SIGN_EXTEND or ZERO_EXTEND from the narrower memory location.  */
10503 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10504 	      && ! mode_dependent_address_p (XEXP (varop, 0),
10505 					     MEM_ADDR_SPACE (varop))
10506 	      && ! MEM_VOLATILE_P (varop)
10507 	      && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10508 					 MODE_INT, 1)) != BLKmode)
10509 	    {
10510 	      new_rtx = adjust_address_nv (varop, tmode,
10511 				       BYTES_BIG_ENDIAN ? 0
10512 				       : count / BITS_PER_UNIT);
10513 
10514 	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10515 				     : ZERO_EXTEND, mode, new_rtx);
10516 	      count = 0;
10517 	      continue;
10518 	    }
10519 	  break;
10520 
10521 	case SUBREG:
10522 	  /* The following rules apply only to scalars.  */
10523 	  if (shift_mode != shift_unit_mode)
10524 	    break;
10525 
10526 	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
10527 	     the same number of words as what we've seen so far.  Then store
10528 	     the widest mode in MODE.  */
10529 	  if (subreg_lowpart_p (varop)
10530 	      && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10531 		  > GET_MODE_SIZE (GET_MODE (varop)))
10532 	      && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10533 				  + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10534 		 == mode_words
10535 	      && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10536 	      && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10537 	    {
10538 	      varop = SUBREG_REG (varop);
10539 	      if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10540 		mode = GET_MODE (varop);
10541 	      continue;
10542 	    }
10543 	  break;
10544 
10545 	case MULT:
10546 	  /* Some machines use MULT instead of ASHIFT because MULT
10547 	     is cheaper.  But it is still better on those machines to
10548 	     merge two shifts into one.  */
10549 	  if (CONST_INT_P (XEXP (varop, 1))
10550 	      && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10551 	    {
10552 	      varop
10553 		= simplify_gen_binary (ASHIFT, GET_MODE (varop),
10554 				       XEXP (varop, 0),
10555 				       GEN_INT (exact_log2 (
10556 						UINTVAL (XEXP (varop, 1)))));
10557 	      continue;
10558 	    }
10559 	  break;
10560 
10561 	case UDIV:
10562 	  /* Similar, for when divides are cheaper.  */
10563 	  if (CONST_INT_P (XEXP (varop, 1))
10564 	      && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10565 	    {
10566 	      varop
10567 		= simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10568 				       XEXP (varop, 0),
10569 				       GEN_INT (exact_log2 (
10570 						UINTVAL (XEXP (varop, 1)))));
10571 	      continue;
10572 	    }
10573 	  break;
10574 
10575 	case ASHIFTRT:
10576 	  /* If we are extracting just the sign bit of an arithmetic
10577 	     right shift, that shift is not needed.  However, the sign
10578 	     bit of a wider mode may be different from what would be
10579 	     interpreted as the sign bit in a narrower mode, so, if
10580 	     the result is narrower, don't discard the shift.  */
10581 	  if (code == LSHIFTRT
10582 	      && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10583 	      && (GET_MODE_UNIT_BITSIZE (result_mode)
10584 		  >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10585 	    {
10586 	      varop = XEXP (varop, 0);
10587 	      continue;
10588 	    }
10589 
10590 	  /* fall through */
10591 
10592 	case LSHIFTRT:
10593 	case ASHIFT:
10594 	case ROTATE:
10595 	  /* The following rules apply only to scalars.  */
10596 	  if (shift_mode != shift_unit_mode)
10597 	    break;
10598 
10599 	  /* Here we have two nested shifts.  The result is usually the
10600 	     AND of a new shift with a mask.  We compute the result below.  */
10601 	  if (CONST_INT_P (XEXP (varop, 1))
10602 	      && INTVAL (XEXP (varop, 1)) >= 0
10603 	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10604 	      && HWI_COMPUTABLE_MODE_P (result_mode)
10605 	      && HWI_COMPUTABLE_MODE_P (mode))
10606 	    {
10607 	      enum rtx_code first_code = GET_CODE (varop);
10608 	      unsigned int first_count = INTVAL (XEXP (varop, 1));
10609 	      unsigned HOST_WIDE_INT mask;
10610 	      rtx mask_rtx;
10611 
10612 	      /* We have one common special case.  We can't do any merging if
10613 		 the inner code is an ASHIFTRT of a smaller mode.  However, if
10614 		 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10615 		 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10616 		 we can convert it to
10617 		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10618 		 This simplifies certain SIGN_EXTEND operations.  */
10619 	      if (code == ASHIFT && first_code == ASHIFTRT
10620 		  && count == (GET_MODE_PRECISION (result_mode)
10621 			       - GET_MODE_PRECISION (GET_MODE (varop))))
10622 		{
10623 		  /* C3 has the low-order C1 bits zero.  */
10624 
10625 		  mask = GET_MODE_MASK (mode)
10626 			 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10627 
10628 		  varop = simplify_and_const_int (NULL_RTX, result_mode,
10629 						  XEXP (varop, 0), mask);
10630 		  varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10631 						varop, count);
10632 		  count = first_count;
10633 		  code = ASHIFTRT;
10634 		  continue;
10635 		}
10636 
10637 	      /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10638 		 than C1 high-order bits equal to the sign bit, we can convert
10639 		 this to either an ASHIFT or an ASHIFTRT depending on the
10640 		 two counts.
10641 
10642 		 We cannot do this if VAROP's mode is not SHIFT_MODE.  */
10643 
10644 	      if (code == ASHIFTRT && first_code == ASHIFT
10645 		  && GET_MODE (varop) == shift_mode
10646 		  && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10647 		      > first_count))
10648 		{
10649 		  varop = XEXP (varop, 0);
10650 		  count -= first_count;
10651 		  if (count < 0)
10652 		    {
10653 		      count = -count;
10654 		      code = ASHIFT;
10655 		    }
10656 
10657 		  continue;
10658 		}
10659 
10660 	      /* There are some cases we can't do.  If CODE is ASHIFTRT,
10661 		 we can only do this if FIRST_CODE is also ASHIFTRT.
10662 
10663 		 We can't do the case when CODE is ROTATE and FIRST_CODE is
10664 		 ASHIFTRT.
10665 
10666 		 If the mode of this shift is not the mode of the outer shift,
10667 		 we can't do this if either shift is a right shift or ROTATE.
10668 
10669 		 Finally, we can't do any of these if the mode is too wide
10670 		 unless the codes are the same.
10671 
10672 		 Handle the case where the shift codes are the same
10673 		 first.  */
10674 
10675 	      if (code == first_code)
10676 		{
10677 		  if (GET_MODE (varop) != result_mode
10678 		      && (code == ASHIFTRT || code == LSHIFTRT
10679 			  || code == ROTATE))
10680 		    break;
10681 
10682 		  count += first_count;
10683 		  varop = XEXP (varop, 0);
10684 		  continue;
10685 		}
10686 
10687 	      if (code == ASHIFTRT
10688 		  || (code == ROTATE && first_code == ASHIFTRT)
10689 		  || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10690 		  || (GET_MODE (varop) != result_mode
10691 		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
10692 			  || first_code == ROTATE
10693 			  || code == ROTATE)))
10694 		break;
10695 
10696 	      /* To compute the mask to apply after the shift, shift the
10697 		 nonzero bits of the inner shift the same way the
10698 		 outer shift will.  */
10699 
10700 	      mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
10701 				       result_mode);
10702 
10703 	      mask_rtx
10704 		= simplify_const_binary_operation (code, result_mode, mask_rtx,
10705 						   GEN_INT (count));
10706 
10707 	      /* Give up if we can't compute an outer operation to use.  */
10708 	      if (mask_rtx == 0
10709 		  || !CONST_INT_P (mask_rtx)
10710 		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
10711 					INTVAL (mask_rtx),
10712 					result_mode, &complement_p))
10713 		break;
10714 
10715 	      /* If the shifts are in the same direction, we add the
10716 		 counts.  Otherwise, we subtract them.  */
10717 	      if ((code == ASHIFTRT || code == LSHIFTRT)
10718 		  == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10719 		count += first_count;
10720 	      else
10721 		count -= first_count;
10722 
10723 	      /* If COUNT is positive, the new shift is usually CODE,
10724 		 except for the two exceptions below, in which case it is
10725 		 FIRST_CODE.  If the count is negative, FIRST_CODE should
10726 		 always be used  */
10727 	      if (count > 0
10728 		  && ((first_code == ROTATE && code == ASHIFT)
10729 		      || (first_code == ASHIFTRT && code == LSHIFTRT)))
10730 		code = first_code;
10731 	      else if (count < 0)
10732 		code = first_code, count = -count;
10733 
10734 	      varop = XEXP (varop, 0);
10735 	      continue;
10736 	    }
10737 
10738 	  /* If we have (A << B << C) for any shift, we can convert this to
10739 	     (A << C << B).  This wins if A is a constant.  Only try this if
10740 	     B is not a constant.  */
10741 
10742 	  else if (GET_CODE (varop) == code
10743 		   && CONST_INT_P (XEXP (varop, 0))
10744 		   && !CONST_INT_P (XEXP (varop, 1)))
10745 	    {
10746 	      /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10747 		 sure the result will be masked.  See PR70222.  */
10748 	      if (code == LSHIFTRT
10749 		  && mode != result_mode
10750 		  && !merge_outer_ops (&outer_op, &outer_const, AND,
10751 				       GET_MODE_MASK (result_mode)
10752 				       >> orig_count, result_mode,
10753 				       &complement_p))
10754 		break;
10755 	      /* For ((int) (cstLL >> count)) >> cst2 just give up.  Queuing
10756 		 up outer sign extension (often left and right shift) is
10757 		 hardly more efficient than the original.  See PR70429.  */
10758 	      if (code == ASHIFTRT && mode != result_mode)
10759 		break;
10760 
10761 	      rtx new_rtx = simplify_const_binary_operation (code, mode,
10762 							     XEXP (varop, 0),
10763 							     GEN_INT (count));
10764 	      varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10765 	      count = 0;
10766 	      continue;
10767 	    }
10768 	  break;
10769 
10770 	case NOT:
10771 	  /* The following rules apply only to scalars.  */
10772 	  if (shift_mode != shift_unit_mode)
10773 	    break;
10774 
10775 	  /* Make this fit the case below.  */
10776 	  varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10777 	  continue;
10778 
10779 	case IOR:
10780 	case AND:
10781 	case XOR:
10782 	  /* The following rules apply only to scalars.  */
10783 	  if (shift_mode != shift_unit_mode)
10784 	    break;
10785 
10786 	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10787 	     with C the size of VAROP - 1 and the shift is logical if
10788 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10789 	     we have an (le X 0) operation.   If we have an arithmetic shift
10790 	     and STORE_FLAG_VALUE is 1 or we have a logical shift with
10791 	     STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation.  */
10792 
10793 	  if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10794 	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10795 	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10796 	      && (code == LSHIFTRT || code == ASHIFTRT)
10797 	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10798 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10799 	    {
10800 	      count = 0;
10801 	      varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10802 				  const0_rtx);
10803 
10804 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10805 		varop = gen_rtx_NEG (GET_MODE (varop), varop);
10806 
10807 	      continue;
10808 	    }
10809 
10810 	  /* If we have (shift (logical)), move the logical to the outside
10811 	     to allow it to possibly combine with another logical and the
10812 	     shift to combine with another shift.  This also canonicalizes to
10813 	     what a ZERO_EXTRACT looks like.  Also, some machines have
10814 	     (and (shift)) insns.  */
10815 
10816 	  if (CONST_INT_P (XEXP (varop, 1))
10817 	      /* We can't do this if we have (ashiftrt (xor))  and the
10818 		 constant has its sign bit set in shift_mode with shift_mode
10819 		 wider than result_mode.  */
10820 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10821 		   && result_mode != shift_mode
10822 		   && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10823 					      shift_mode))
10824 	      && (new_rtx = simplify_const_binary_operation
10825 		  (code, result_mode,
10826 		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10827 		   GEN_INT (count))) != 0
10828 	      && CONST_INT_P (new_rtx)
10829 	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10830 				  INTVAL (new_rtx), result_mode, &complement_p))
10831 	    {
10832 	      varop = XEXP (varop, 0);
10833 	      continue;
10834 	    }
10835 
10836 	  /* If we can't do that, try to simplify the shift in each arm of the
10837 	     logical expression, make a new logical expression, and apply
10838 	     the inverse distributive law.  This also can't be done for
10839 	     (ashiftrt (xor)) where we've widened the shift and the constant
10840 	     changes the sign bit.  */
10841 	  if (CONST_INT_P (XEXP (varop, 1))
10842 	     && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10843 		  && result_mode != shift_mode
10844 		  && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10845 					     shift_mode)))
10846 	    {
10847 	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10848 					      XEXP (varop, 0), count);
10849 	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10850 					      XEXP (varop, 1), count);
10851 
10852 	      varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10853 					   lhs, rhs);
10854 	      varop = apply_distributive_law (varop);
10855 
10856 	      count = 0;
10857 	      continue;
10858 	    }
10859 	  break;
10860 
10861 	case EQ:
10862 	  /* The following rules apply only to scalars.  */
10863 	  if (shift_mode != shift_unit_mode)
10864 	    break;
10865 
10866 	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10867 	     says that the sign bit can be tested, FOO has mode MODE, C is
10868 	     GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10869 	     that may be nonzero.  */
10870 	  if (code == LSHIFTRT
10871 	      && XEXP (varop, 1) == const0_rtx
10872 	      && GET_MODE (XEXP (varop, 0)) == result_mode
10873 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10874 	      && HWI_COMPUTABLE_MODE_P (result_mode)
10875 	      && STORE_FLAG_VALUE == -1
10876 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10877 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10878 				  &complement_p))
10879 	    {
10880 	      varop = XEXP (varop, 0);
10881 	      count = 0;
10882 	      continue;
10883 	    }
10884 	  break;
10885 
10886 	case NEG:
10887 	  /* The following rules apply only to scalars.  */
10888 	  if (shift_mode != shift_unit_mode)
10889 	    break;
10890 
10891 	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10892 	     than the number of bits in the mode is equivalent to A.  */
10893 	  if (code == LSHIFTRT
10894 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10895 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10896 	    {
10897 	      varop = XEXP (varop, 0);
10898 	      count = 0;
10899 	      continue;
10900 	    }
10901 
10902 	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
10903 	     NEG outside to allow shifts to combine.  */
10904 	  if (code == ASHIFT
10905 	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10906 				  &complement_p))
10907 	    {
10908 	      varop = XEXP (varop, 0);
10909 	      continue;
10910 	    }
10911 	  break;
10912 
10913 	case PLUS:
10914 	  /* The following rules apply only to scalars.  */
10915 	  if (shift_mode != shift_unit_mode)
10916 	    break;
10917 
10918 	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10919 	     is one less than the number of bits in the mode is
10920 	     equivalent to (xor A 1).  */
10921 	  if (code == LSHIFTRT
10922 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10923 	      && XEXP (varop, 1) == constm1_rtx
10924 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10925 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10926 				  &complement_p))
10927 	    {
10928 	      count = 0;
10929 	      varop = XEXP (varop, 0);
10930 	      continue;
10931 	    }
10932 
10933 	  /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10934 	     that might be nonzero in BAR are those being shifted out and those
10935 	     bits are known zero in FOO, we can replace the PLUS with FOO.
10936 	     Similarly in the other operand order.  This code occurs when
10937 	     we are computing the size of a variable-size array.  */
10938 
10939 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10940 	      && count < HOST_BITS_PER_WIDE_INT
10941 	      && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10942 	      && (nonzero_bits (XEXP (varop, 1), result_mode)
10943 		  & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10944 	    {
10945 	      varop = XEXP (varop, 0);
10946 	      continue;
10947 	    }
10948 	  else if ((code == ASHIFTRT || code == LSHIFTRT)
10949 		   && count < HOST_BITS_PER_WIDE_INT
10950 		   && HWI_COMPUTABLE_MODE_P (result_mode)
10951 		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10952 			    >> count)
10953 		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10954 			    & nonzero_bits (XEXP (varop, 1),
10955 						 result_mode)))
10956 	    {
10957 	      varop = XEXP (varop, 1);
10958 	      continue;
10959 	    }
10960 
10961 	  /* (ashift (plus foo C) N) is (plus (ashift foo N) C').  */
10962 	  if (code == ASHIFT
10963 	      && CONST_INT_P (XEXP (varop, 1))
10964 	      && (new_rtx = simplify_const_binary_operation
10965 		  (ASHIFT, result_mode,
10966 		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10967 		   GEN_INT (count))) != 0
10968 	      && CONST_INT_P (new_rtx)
10969 	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
10970 				  INTVAL (new_rtx), result_mode, &complement_p))
10971 	    {
10972 	      varop = XEXP (varop, 0);
10973 	      continue;
10974 	    }
10975 
10976 	  /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10977 	     signbit', and attempt to change the PLUS to an XOR and move it to
10978 	     the outer operation as is done above in the AND/IOR/XOR case
10979 	     leg for shift(logical). See details in logical handling above
10980 	     for reasoning in doing so.  */
10981 	  if (code == LSHIFTRT
10982 	      && CONST_INT_P (XEXP (varop, 1))
10983 	      && mode_signbit_p (result_mode, XEXP (varop, 1))
10984 	      && (new_rtx = simplify_const_binary_operation
10985 		  (code, result_mode,
10986 		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10987 		   GEN_INT (count))) != 0
10988 	      && CONST_INT_P (new_rtx)
10989 	      && merge_outer_ops (&outer_op, &outer_const, XOR,
10990 				  INTVAL (new_rtx), result_mode, &complement_p))
10991 	    {
10992 	      varop = XEXP (varop, 0);
10993 	      continue;
10994 	    }
10995 
10996 	  break;
10997 
10998 	case MINUS:
10999 	  /* The following rules apply only to scalars.  */
11000 	  if (shift_mode != shift_unit_mode)
11001 	    break;
11002 
11003 	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11004 	     with C the size of VAROP - 1 and the shift is logical if
11005 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11006 	     we have a (gt X 0) operation.  If the shift is arithmetic with
11007 	     STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11008 	     we have a (neg (gt X 0)) operation.  */
11009 
11010 	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11011 	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11012 	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
11013 	      && (code == LSHIFTRT || code == ASHIFTRT)
11014 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11015 	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11016 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11017 	    {
11018 	      count = 0;
11019 	      varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
11020 				  const0_rtx);
11021 
11022 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11023 		varop = gen_rtx_NEG (GET_MODE (varop), varop);
11024 
11025 	      continue;
11026 	    }
11027 	  break;
11028 
11029 	case TRUNCATE:
11030 	  /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11031 	     if the truncate does not affect the value.  */
11032 	  if (code == LSHIFTRT
11033 	      && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11034 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11035 	      && (INTVAL (XEXP (XEXP (varop, 0), 1))
11036 		  >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11037 		      - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11038 	    {
11039 	      rtx varop_inner = XEXP (varop, 0);
11040 
11041 	      varop_inner
11042 		= gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11043 				    XEXP (varop_inner, 0),
11044 				    GEN_INT
11045 				    (count + INTVAL (XEXP (varop_inner, 1))));
11046 	      varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11047 	      count = 0;
11048 	      continue;
11049 	    }
11050 	  break;
11051 
11052 	default:
11053 	  break;
11054 	}
11055 
11056       break;
11057     }
11058 
11059   shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
11060 				     outer_op, outer_const);
11061 
11062   /* We have now finished analyzing the shift.  The result should be
11063      a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
11064      OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11065      to the result of the shift.  OUTER_CONST is the relevant constant,
11066      but we must turn off all bits turned off in the shift.  */
11067 
11068   if (outer_op == UNKNOWN
11069       && orig_code == code && orig_count == count
11070       && varop == orig_varop
11071       && shift_mode == GET_MODE (varop))
11072     return NULL_RTX;
11073 
11074   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
11075   varop = gen_lowpart (shift_mode, varop);
11076   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11077     return NULL_RTX;
11078 
11079   /* If we have an outer operation and we just made a shift, it is
11080      possible that we could have simplified the shift were it not
11081      for the outer operation.  So try to do the simplification
11082      recursively.  */
11083 
11084   if (outer_op != UNKNOWN)
11085     x = simplify_shift_const_1 (code, shift_mode, varop, count);
11086   else
11087     x = NULL_RTX;
11088 
11089   if (x == NULL_RTX)
11090     x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11091 
11092   /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11093      turn off all the bits that the shift would have turned off.  */
11094   if (orig_code == LSHIFTRT && result_mode != shift_mode)
11095     x = simplify_and_const_int (NULL_RTX, shift_mode, x,
11096 				GET_MODE_MASK (result_mode) >> orig_count);
11097 
11098   /* Do the remainder of the processing in RESULT_MODE.  */
11099   x = gen_lowpart_or_truncate (result_mode, x);
11100 
11101   /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11102      operation.  */
11103   if (complement_p)
11104     x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11105 
11106   if (outer_op != UNKNOWN)
11107     {
11108       if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11109 	  && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
11110 	outer_const = trunc_int_for_mode (outer_const, result_mode);
11111 
11112       if (outer_op == AND)
11113 	x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
11114       else if (outer_op == SET)
11115 	{
11116 	  /* This means that we have determined that the result is
11117 	     equivalent to a constant.  This should be rare.  */
11118 	  if (!side_effects_p (x))
11119 	    x = GEN_INT (outer_const);
11120 	}
11121       else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11122 	x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
11123       else
11124 	x = simplify_gen_binary (outer_op, result_mode, x,
11125 				 GEN_INT (outer_const));
11126     }
11127 
11128   return x;
11129 }
11130 
11131 /* Simplify a shift of VAROP by COUNT bits.  CODE says what kind of shift.
11132    The result of the shift is RESULT_MODE.  If we cannot simplify it,
11133    return X or, if it is NULL, synthesize the expression with
11134    simplify_gen_binary.  Otherwise, return a simplified value.
11135 
11136    The shift is normally computed in the widest mode we find in VAROP, as
11137    long as it isn't a different number of words than RESULT_MODE.  Exceptions
11138    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
11139 
11140 static rtx
11141 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11142 		      rtx varop, int count)
11143 {
11144   rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11145   if (tem)
11146     return tem;
11147 
11148   if (!x)
11149     x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11150   if (GET_MODE (x) != result_mode)
11151     x = gen_lowpart (result_mode, x);
11152   return x;
11153 }
11154 
11155 
11156 /* A subroutine of recog_for_combine.  See there for arguments and
11157    return value.  */
11158 
11159 static int
11160 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11161 {
11162   rtx pat = *pnewpat;
11163   rtx pat_without_clobbers;
11164   int insn_code_number;
11165   int num_clobbers_to_add = 0;
11166   int i;
11167   rtx notes = NULL_RTX;
11168   rtx old_notes, old_pat;
11169   int old_icode;
11170 
11171   /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11172      we use to indicate that something didn't match.  If we find such a
11173      thing, force rejection.  */
11174   if (GET_CODE (pat) == PARALLEL)
11175     for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11176       if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11177 	  && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11178 	return -1;
11179 
11180   old_pat = PATTERN (insn);
11181   old_notes = REG_NOTES (insn);
11182   PATTERN (insn) = pat;
11183   REG_NOTES (insn) = NULL_RTX;
11184 
11185   insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11186   if (dump_file && (dump_flags & TDF_DETAILS))
11187     {
11188       if (insn_code_number < 0)
11189 	fputs ("Failed to match this instruction:\n", dump_file);
11190       else
11191 	fputs ("Successfully matched this instruction:\n", dump_file);
11192       print_rtl_single (dump_file, pat);
11193     }
11194 
11195   /* If it isn't, there is the possibility that we previously had an insn
11196      that clobbered some register as a side effect, but the combined
11197      insn doesn't need to do that.  So try once more without the clobbers
11198      unless this represents an ASM insn.  */
11199 
11200   if (insn_code_number < 0 && ! check_asm_operands (pat)
11201       && GET_CODE (pat) == PARALLEL)
11202     {
11203       int pos;
11204 
11205       for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11206 	if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11207 	  {
11208 	    if (i != pos)
11209 	      SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11210 	    pos++;
11211 	  }
11212 
11213       SUBST_INT (XVECLEN (pat, 0), pos);
11214 
11215       if (pos == 1)
11216 	pat = XVECEXP (pat, 0, 0);
11217 
11218       PATTERN (insn) = pat;
11219       insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11220       if (dump_file && (dump_flags & TDF_DETAILS))
11221 	{
11222 	  if (insn_code_number < 0)
11223 	    fputs ("Failed to match this instruction:\n", dump_file);
11224 	  else
11225 	    fputs ("Successfully matched this instruction:\n", dump_file);
11226 	  print_rtl_single (dump_file, pat);
11227 	}
11228     }
11229 
11230   pat_without_clobbers = pat;
11231 
11232   PATTERN (insn) = old_pat;
11233   REG_NOTES (insn) = old_notes;
11234 
11235   /* Recognize all noop sets, these will be killed by followup pass.  */
11236   if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11237     insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11238 
11239   /* If we had any clobbers to add, make a new pattern than contains
11240      them.  Then check to make sure that all of them are dead.  */
11241   if (num_clobbers_to_add)
11242     {
11243       rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11244 				     rtvec_alloc (GET_CODE (pat) == PARALLEL
11245 						  ? (XVECLEN (pat, 0)
11246 						     + num_clobbers_to_add)
11247 						  : num_clobbers_to_add + 1));
11248 
11249       if (GET_CODE (pat) == PARALLEL)
11250 	for (i = 0; i < XVECLEN (pat, 0); i++)
11251 	  XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11252       else
11253 	XVECEXP (newpat, 0, 0) = pat;
11254 
11255       add_clobbers (newpat, insn_code_number);
11256 
11257       for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11258 	   i < XVECLEN (newpat, 0); i++)
11259 	{
11260 	  if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11261 	      && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11262 	    return -1;
11263 	  if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11264 	    {
11265 	      gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11266 	      notes = alloc_reg_note (REG_UNUSED,
11267 				      XEXP (XVECEXP (newpat, 0, i), 0), notes);
11268 	    }
11269 	}
11270       pat = newpat;
11271     }
11272 
11273   if (insn_code_number >= 0
11274       && insn_code_number != NOOP_MOVE_INSN_CODE)
11275     {
11276       old_pat = PATTERN (insn);
11277       old_notes = REG_NOTES (insn);
11278       old_icode = INSN_CODE (insn);
11279       PATTERN (insn) = pat;
11280       REG_NOTES (insn) = notes;
11281       INSN_CODE (insn) = insn_code_number;
11282 
11283       /* Allow targets to reject combined insn.  */
11284       if (!targetm.legitimate_combined_insn (insn))
11285 	{
11286 	  if (dump_file && (dump_flags & TDF_DETAILS))
11287 	    fputs ("Instruction not appropriate for target.",
11288 		   dump_file);
11289 
11290 	  /* Callers expect recog_for_combine to strip
11291 	     clobbers from the pattern on failure.  */
11292 	  pat = pat_without_clobbers;
11293 	  notes = NULL_RTX;
11294 
11295 	  insn_code_number = -1;
11296 	}
11297 
11298       PATTERN (insn) = old_pat;
11299       REG_NOTES (insn) = old_notes;
11300       INSN_CODE (insn) = old_icode;
11301     }
11302 
11303   *pnewpat = pat;
11304   *pnotes = notes;
11305 
11306   return insn_code_number;
11307 }
11308 
11309 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11310    expressed as an AND and maybe an LSHIFTRT, to that formulation.
11311    Return whether anything was so changed.  */
11312 
11313 static bool
11314 change_zero_ext (rtx pat)
11315 {
11316   bool changed = false;
11317   rtx *src = &SET_SRC (pat);
11318 
11319   subrtx_ptr_iterator::array_type array;
11320   FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11321     {
11322       rtx x = **iter;
11323       machine_mode mode = GET_MODE (x);
11324       int size;
11325 
11326       if (GET_CODE (x) == ZERO_EXTRACT
11327 	  && CONST_INT_P (XEXP (x, 1))
11328 	  && CONST_INT_P (XEXP (x, 2))
11329 	  && GET_MODE (XEXP (x, 0)) != VOIDmode
11330 	  && GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
11331 	      <= GET_MODE_PRECISION (mode))
11332 	{
11333 	  machine_mode inner_mode = GET_MODE (XEXP (x, 0));
11334 
11335 	  size = INTVAL (XEXP (x, 1));
11336 
11337 	  int start = INTVAL (XEXP (x, 2));
11338 	  if (BITS_BIG_ENDIAN)
11339 	    start = GET_MODE_PRECISION (inner_mode) - size - start;
11340 
11341 	  if (start)
11342 	    x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11343 	  else
11344 	    x = XEXP (x, 0);
11345 
11346 	  if (mode != inner_mode)
11347 	    {
11348 	      if (REG_P (x) && HARD_REGISTER_P (x)
11349 		  && !can_change_dest_mode (x, 0, mode))
11350 		continue;
11351 
11352 	      x = gen_lowpart_SUBREG (mode, x);
11353 	    }
11354 	}
11355       else if (GET_CODE (x) == ZERO_EXTEND
11356 	       && SCALAR_INT_MODE_P (mode)
11357 	       && GET_CODE (XEXP (x, 0)) == SUBREG
11358 	       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11359 	       && !paradoxical_subreg_p (XEXP (x, 0))
11360 	       && subreg_lowpart_p (XEXP (x, 0)))
11361 	{
11362 	  size = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
11363 	  x = SUBREG_REG (XEXP (x, 0));
11364 	  if (GET_MODE (x) != mode)
11365 	    {
11366 	      if (REG_P (x) && HARD_REGISTER_P (x)
11367 		  && !can_change_dest_mode (x, 0, mode))
11368 		continue;
11369 
11370 	      x = gen_lowpart_SUBREG (mode, x);
11371 	    }
11372 	}
11373       else if (GET_CODE (x) == ZERO_EXTEND
11374 	       && SCALAR_INT_MODE_P (mode)
11375 	       && REG_P (XEXP (x, 0))
11376 	       && HARD_REGISTER_P (XEXP (x, 0))
11377 	       && can_change_dest_mode (XEXP (x, 0), 0, mode))
11378 	{
11379 	  size = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
11380 	  x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11381 	}
11382       else
11383 	continue;
11384 
11385       if (!(GET_CODE (x) == LSHIFTRT
11386 	    && CONST_INT_P (XEXP (x, 1))
11387 	    && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11388 	{
11389 	  wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11390 	  x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11391 	}
11392 
11393       SUBST (**iter, x);
11394       changed = true;
11395     }
11396 
11397   if (changed)
11398     FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11399       maybe_swap_commutative_operands (**iter);
11400 
11401   rtx *dst = &SET_DEST (pat);
11402   if (GET_CODE (*dst) == ZERO_EXTRACT
11403       && REG_P (XEXP (*dst, 0))
11404       && CONST_INT_P (XEXP (*dst, 1))
11405       && CONST_INT_P (XEXP (*dst, 2)))
11406     {
11407       rtx reg = XEXP (*dst, 0);
11408       int width = INTVAL (XEXP (*dst, 1));
11409       int offset = INTVAL (XEXP (*dst, 2));
11410       machine_mode mode = GET_MODE (reg);
11411       int reg_width = GET_MODE_PRECISION (mode);
11412       if (BITS_BIG_ENDIAN)
11413 	offset = reg_width - width - offset;
11414 
11415       rtx x, y, z, w;
11416       wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11417       wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11418       x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11419       if (offset)
11420 	y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11421       else
11422 	y = SET_SRC (pat);
11423       z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11424       w = gen_rtx_IOR (mode, x, z);
11425       SUBST (SET_DEST (pat), reg);
11426       SUBST (SET_SRC (pat), w);
11427 
11428       changed = true;
11429     }
11430 
11431   return changed;
11432 }
11433 
11434 /* Like recog, but we receive the address of a pointer to a new pattern.
11435    We try to match the rtx that the pointer points to.
11436    If that fails, we may try to modify or replace the pattern,
11437    storing the replacement into the same pointer object.
11438 
11439    Modifications include deletion or addition of CLOBBERs.  If the
11440    instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11441    to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11442    (and undo if that fails).
11443 
11444    PNOTES is a pointer to a location where any REG_UNUSED notes added for
11445    the CLOBBERs are placed.
11446 
11447    The value is the final insn code from the pattern ultimately matched,
11448    or -1.  */
11449 
11450 static int
11451 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11452 {
11453   rtx pat = *pnewpat;
11454   int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11455   if (insn_code_number >= 0 || check_asm_operands (pat))
11456     return insn_code_number;
11457 
11458   void *marker = get_undo_marker ();
11459   bool changed = false;
11460 
11461   if (GET_CODE (pat) == SET)
11462     changed = change_zero_ext (pat);
11463   else if (GET_CODE (pat) == PARALLEL)
11464     {
11465       int i;
11466       for (i = 0; i < XVECLEN (pat, 0); i++)
11467 	{
11468 	  rtx set = XVECEXP (pat, 0, i);
11469 	  if (GET_CODE (set) == SET)
11470 	    changed |= change_zero_ext (set);
11471 	}
11472     }
11473 
11474   if (changed)
11475     {
11476       insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11477 
11478       if (insn_code_number < 0)
11479 	undo_to_marker (marker);
11480     }
11481 
11482   return insn_code_number;
11483 }
11484 
11485 /* Like gen_lowpart_general but for use by combine.  In combine it
11486    is not possible to create any new pseudoregs.  However, it is
11487    safe to create invalid memory addresses, because combine will
11488    try to recognize them and all they will do is make the combine
11489    attempt fail.
11490 
11491    If for some reason this cannot do its job, an rtx
11492    (clobber (const_int 0)) is returned.
11493    An insn containing that will not be recognized.  */
11494 
11495 static rtx
11496 gen_lowpart_for_combine (machine_mode omode, rtx x)
11497 {
11498   machine_mode imode = GET_MODE (x);
11499   unsigned int osize = GET_MODE_SIZE (omode);
11500   unsigned int isize = GET_MODE_SIZE (imode);
11501   rtx result;
11502 
11503   if (omode == imode)
11504     return x;
11505 
11506   /* We can only support MODE being wider than a word if X is a
11507      constant integer or has a mode the same size.  */
11508   if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11509       && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11510     goto fail;
11511 
11512   /* X might be a paradoxical (subreg (mem)).  In that case, gen_lowpart
11513      won't know what to do.  So we will strip off the SUBREG here and
11514      process normally.  */
11515   if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11516     {
11517       x = SUBREG_REG (x);
11518 
11519       /* For use in case we fall down into the address adjustments
11520 	 further below, we need to adjust the known mode and size of
11521 	 x; imode and isize, since we just adjusted x.  */
11522       imode = GET_MODE (x);
11523 
11524       if (imode == omode)
11525 	return x;
11526 
11527       isize = GET_MODE_SIZE (imode);
11528     }
11529 
11530   result = gen_lowpart_common (omode, x);
11531 
11532   if (result)
11533     return result;
11534 
11535   if (MEM_P (x))
11536     {
11537       int offset = 0;
11538 
11539       /* Refuse to work on a volatile memory ref or one with a mode-dependent
11540 	 address.  */
11541       if (MEM_VOLATILE_P (x)
11542 	  || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11543 	goto fail;
11544 
11545       /* If we want to refer to something bigger than the original memref,
11546 	 generate a paradoxical subreg instead.  That will force a reload
11547 	 of the original memref X.  */
11548       if (isize < osize)
11549 	return gen_rtx_SUBREG (omode, x, 0);
11550 
11551       if (WORDS_BIG_ENDIAN)
11552 	offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11553 
11554       /* Adjust the address so that the address-after-the-data is
11555 	 unchanged.  */
11556       if (BYTES_BIG_ENDIAN)
11557 	offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11558 
11559       return adjust_address_nv (x, omode, offset);
11560     }
11561 
11562   /* If X is a comparison operator, rewrite it in a new mode.  This
11563      probably won't match, but may allow further simplifications.  */
11564   else if (COMPARISON_P (x))
11565     return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11566 
11567   /* If we couldn't simplify X any other way, just enclose it in a
11568      SUBREG.  Normally, this SUBREG won't match, but some patterns may
11569      include an explicit SUBREG or we may simplify it further in combine.  */
11570   else
11571     {
11572       rtx res;
11573 
11574       if (imode == VOIDmode)
11575 	{
11576 	  imode = int_mode_for_mode (omode);
11577 	  x = gen_lowpart_common (imode, x);
11578 	  if (x == NULL)
11579 	    goto fail;
11580 	}
11581       res = lowpart_subreg (omode, x, imode);
11582       if (res)
11583 	return res;
11584     }
11585 
11586  fail:
11587   return gen_rtx_CLOBBER (omode, const0_rtx);
11588 }
11589 
11590 /* Try to simplify a comparison between OP0 and a constant OP1,
11591    where CODE is the comparison code that will be tested, into a
11592    (CODE OP0 const0_rtx) form.
11593 
11594    The result is a possibly different comparison code to use.
11595    *POP1 may be updated.  */
11596 
11597 static enum rtx_code
11598 simplify_compare_const (enum rtx_code code, machine_mode mode,
11599 			rtx op0, rtx *pop1)
11600 {
11601   unsigned int mode_width = GET_MODE_PRECISION (mode);
11602   HOST_WIDE_INT const_op = INTVAL (*pop1);
11603 
11604   /* Get the constant we are comparing against and turn off all bits
11605      not on in our mode.  */
11606   if (mode != VOIDmode)
11607     const_op = trunc_int_for_mode (const_op, mode);
11608 
11609   /* If we are comparing against a constant power of two and the value
11610      being compared can only have that single bit nonzero (e.g., it was
11611      `and'ed with that bit), we can replace this with a comparison
11612      with zero.  */
11613   if (const_op
11614       && (code == EQ || code == NE || code == GE || code == GEU
11615 	  || code == LT || code == LTU)
11616       && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11617       && pow2p_hwi (const_op & GET_MODE_MASK (mode))
11618       && (nonzero_bits (op0, mode)
11619 	  == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
11620     {
11621       code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11622       const_op = 0;
11623     }
11624 
11625   /* Similarly, if we are comparing a value known to be either -1 or
11626      0 with -1, change it to the opposite comparison against zero.  */
11627   if (const_op == -1
11628       && (code == EQ || code == NE || code == GT || code == LE
11629 	  || code == GEU || code == LTU)
11630       && num_sign_bit_copies (op0, mode) == mode_width)
11631     {
11632       code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11633       const_op = 0;
11634     }
11635 
11636   /* Do some canonicalizations based on the comparison code.  We prefer
11637      comparisons against zero and then prefer equality comparisons.
11638      If we can reduce the size of a constant, we will do that too.  */
11639   switch (code)
11640     {
11641     case LT:
11642       /* < C is equivalent to <= (C - 1) */
11643       if (const_op > 0)
11644 	{
11645 	  const_op -= 1;
11646 	  code = LE;
11647 	  /* ... fall through to LE case below.  */
11648 	  gcc_fallthrough ();
11649 	}
11650       else
11651 	break;
11652 
11653     case LE:
11654       /* <= C is equivalent to < (C + 1); we do this for C < 0  */
11655       if (const_op < 0)
11656 	{
11657 	  const_op += 1;
11658 	  code = LT;
11659 	}
11660 
11661       /* If we are doing a <= 0 comparison on a value known to have
11662 	 a zero sign bit, we can replace this with == 0.  */
11663       else if (const_op == 0
11664 	       && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11665 	       && (nonzero_bits (op0, mode)
11666 		   & (HOST_WIDE_INT_1U << (mode_width - 1)))
11667 	       == 0)
11668 	code = EQ;
11669       break;
11670 
11671     case GE:
11672       /* >= C is equivalent to > (C - 1).  */
11673       if (const_op > 0)
11674 	{
11675 	  const_op -= 1;
11676 	  code = GT;
11677 	  /* ... fall through to GT below.  */
11678 	  gcc_fallthrough ();
11679 	}
11680       else
11681 	break;
11682 
11683     case GT:
11684       /* > C is equivalent to >= (C + 1); we do this for C < 0.  */
11685       if (const_op < 0)
11686 	{
11687 	  const_op += 1;
11688 	  code = GE;
11689 	}
11690 
11691       /* If we are doing a > 0 comparison on a value known to have
11692 	 a zero sign bit, we can replace this with != 0.  */
11693       else if (const_op == 0
11694 	       && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11695 	       && (nonzero_bits (op0, mode)
11696 		   & (HOST_WIDE_INT_1U << (mode_width - 1)))
11697 	       == 0)
11698 	code = NE;
11699       break;
11700 
11701     case LTU:
11702       /* < C is equivalent to <= (C - 1).  */
11703       if (const_op > 0)
11704 	{
11705 	  const_op -= 1;
11706 	  code = LEU;
11707 	  /* ... fall through ...  */
11708 	}
11709       /* (unsigned) < 0x80000000 is equivalent to >= 0.  */
11710       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11711 	       && (unsigned HOST_WIDE_INT) const_op
11712 	       == HOST_WIDE_INT_1U << (mode_width - 1))
11713 	{
11714 	  const_op = 0;
11715 	  code = GE;
11716 	  break;
11717 	}
11718       else
11719 	break;
11720 
11721     case LEU:
11722       /* unsigned <= 0 is equivalent to == 0 */
11723       if (const_op == 0)
11724 	code = EQ;
11725       /* (unsigned) <= 0x7fffffff is equivalent to >= 0.  */
11726       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11727 	       && (unsigned HOST_WIDE_INT) const_op
11728 	       == (HOST_WIDE_INT_1U << (mode_width - 1)) - 1)
11729 	{
11730 	  const_op = 0;
11731 	  code = GE;
11732 	}
11733       break;
11734 
11735     case GEU:
11736       /* >= C is equivalent to > (C - 1).  */
11737       if (const_op > 1)
11738 	{
11739 	  const_op -= 1;
11740 	  code = GTU;
11741 	  /* ... fall through ...  */
11742 	}
11743 
11744       /* (unsigned) >= 0x80000000 is equivalent to < 0.  */
11745       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11746 	       && (unsigned HOST_WIDE_INT) const_op
11747 	       == HOST_WIDE_INT_1U << (mode_width - 1))
11748 	{
11749 	  const_op = 0;
11750 	  code = LT;
11751 	  break;
11752 	}
11753       else
11754 	break;
11755 
11756     case GTU:
11757       /* unsigned > 0 is equivalent to != 0 */
11758       if (const_op == 0)
11759 	code = NE;
11760       /* (unsigned) > 0x7fffffff is equivalent to < 0.  */
11761       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11762 	       && (unsigned HOST_WIDE_INT) const_op
11763 	       == (HOST_WIDE_INT_1U << (mode_width - 1)) - 1)
11764 	{
11765 	  const_op = 0;
11766 	  code = LT;
11767 	}
11768       break;
11769 
11770     default:
11771       break;
11772     }
11773 
11774   *pop1 = GEN_INT (const_op);
11775   return code;
11776 }
11777 
11778 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11779    comparison code that will be tested.
11780 
11781    The result is a possibly different comparison code to use.  *POP0 and
11782    *POP1 may be updated.
11783 
11784    It is possible that we might detect that a comparison is either always
11785    true or always false.  However, we do not perform general constant
11786    folding in combine, so this knowledge isn't useful.  Such tautologies
11787    should have been detected earlier.  Hence we ignore all such cases.  */
11788 
11789 static enum rtx_code
11790 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11791 {
11792   rtx op0 = *pop0;
11793   rtx op1 = *pop1;
11794   rtx tem, tem1;
11795   int i;
11796   machine_mode mode, tmode;
11797 
11798   /* Try a few ways of applying the same transformation to both operands.  */
11799   while (1)
11800     {
11801       /* The test below this one won't handle SIGN_EXTENDs on these machines,
11802 	 so check specially.  */
11803       if (!WORD_REGISTER_OPERATIONS
11804 	  && code != GTU && code != GEU && code != LTU && code != LEU
11805 	  && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11806 	  && GET_CODE (XEXP (op0, 0)) == ASHIFT
11807 	  && GET_CODE (XEXP (op1, 0)) == ASHIFT
11808 	  && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11809 	  && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11810 	  && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11811 	      == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11812 	  && CONST_INT_P (XEXP (op0, 1))
11813 	  && XEXP (op0, 1) == XEXP (op1, 1)
11814 	  && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11815 	  && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11816 	  && (INTVAL (XEXP (op0, 1))
11817 	      == (GET_MODE_PRECISION (GET_MODE (op0))
11818 		  - (GET_MODE_PRECISION
11819 		     (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11820 	{
11821 	  op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11822 	  op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11823 	}
11824 
11825       /* If both operands are the same constant shift, see if we can ignore the
11826 	 shift.  We can if the shift is a rotate or if the bits shifted out of
11827 	 this shift are known to be zero for both inputs and if the type of
11828 	 comparison is compatible with the shift.  */
11829       if (GET_CODE (op0) == GET_CODE (op1)
11830 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11831 	  && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11832 	      || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11833 		  && (code != GT && code != LT && code != GE && code != LE))
11834 	      || (GET_CODE (op0) == ASHIFTRT
11835 		  && (code != GTU && code != LTU
11836 		      && code != GEU && code != LEU)))
11837 	  && CONST_INT_P (XEXP (op0, 1))
11838 	  && INTVAL (XEXP (op0, 1)) >= 0
11839 	  && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11840 	  && XEXP (op0, 1) == XEXP (op1, 1))
11841 	{
11842 	  machine_mode mode = GET_MODE (op0);
11843 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11844 	  int shift_count = INTVAL (XEXP (op0, 1));
11845 
11846 	  if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11847 	    mask &= (mask >> shift_count) << shift_count;
11848 	  else if (GET_CODE (op0) == ASHIFT)
11849 	    mask = (mask & (mask << shift_count)) >> shift_count;
11850 
11851 	  if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11852 	      && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11853 	    op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11854 	  else
11855 	    break;
11856 	}
11857 
11858       /* If both operands are AND's of a paradoxical SUBREG by constant, the
11859 	 SUBREGs are of the same mode, and, in both cases, the AND would
11860 	 be redundant if the comparison was done in the narrower mode,
11861 	 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11862 	 and the operand's possibly nonzero bits are 0xffffff01; in that case
11863 	 if we only care about QImode, we don't need the AND).  This case
11864 	 occurs if the output mode of an scc insn is not SImode and
11865 	 STORE_FLAG_VALUE == 1 (e.g., the 386).
11866 
11867 	 Similarly, check for a case where the AND's are ZERO_EXTEND
11868 	 operations from some narrower mode even though a SUBREG is not
11869 	 present.  */
11870 
11871       else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11872 	       && CONST_INT_P (XEXP (op0, 1))
11873 	       && CONST_INT_P (XEXP (op1, 1)))
11874 	{
11875 	  rtx inner_op0 = XEXP (op0, 0);
11876 	  rtx inner_op1 = XEXP (op1, 0);
11877 	  HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11878 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11879 	  int changed = 0;
11880 
11881 	  if (paradoxical_subreg_p (inner_op0)
11882 	      && GET_CODE (inner_op1) == SUBREG
11883 	      && (GET_MODE (SUBREG_REG (inner_op0))
11884 		  == GET_MODE (SUBREG_REG (inner_op1)))
11885 	      && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11886 		  <= HOST_BITS_PER_WIDE_INT)
11887 	      && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11888 					     GET_MODE (SUBREG_REG (inner_op0)))))
11889 	      && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11890 					     GET_MODE (SUBREG_REG (inner_op1))))))
11891 	    {
11892 	      op0 = SUBREG_REG (inner_op0);
11893 	      op1 = SUBREG_REG (inner_op1);
11894 
11895 	      /* The resulting comparison is always unsigned since we masked
11896 		 off the original sign bit.  */
11897 	      code = unsigned_condition (code);
11898 
11899 	      changed = 1;
11900 	    }
11901 
11902 	  else if (c0 == c1)
11903 	    for (tmode = GET_CLASS_NARROWEST_MODE
11904 		 (GET_MODE_CLASS (GET_MODE (op0)));
11905 		 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11906 	      if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11907 		{
11908 		  op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11909 		  op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11910 		  code = unsigned_condition (code);
11911 		  changed = 1;
11912 		  break;
11913 		}
11914 
11915 	  if (! changed)
11916 	    break;
11917 	}
11918 
11919       /* If both operands are NOT, we can strip off the outer operation
11920 	 and adjust the comparison code for swapped operands; similarly for
11921 	 NEG, except that this must be an equality comparison.  */
11922       else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11923 	       || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11924 		   && (code == EQ || code == NE)))
11925 	op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11926 
11927       else
11928 	break;
11929     }
11930 
11931   /* If the first operand is a constant, swap the operands and adjust the
11932      comparison code appropriately, but don't do this if the second operand
11933      is already a constant integer.  */
11934   if (swap_commutative_operands_p (op0, op1))
11935     {
11936       std::swap (op0, op1);
11937       code = swap_condition (code);
11938     }
11939 
11940   /* We now enter a loop during which we will try to simplify the comparison.
11941      For the most part, we only are concerned with comparisons with zero,
11942      but some things may really be comparisons with zero but not start
11943      out looking that way.  */
11944 
11945   while (CONST_INT_P (op1))
11946     {
11947       machine_mode mode = GET_MODE (op0);
11948       unsigned int mode_width = GET_MODE_PRECISION (mode);
11949       unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11950       int equality_comparison_p;
11951       int sign_bit_comparison_p;
11952       int unsigned_comparison_p;
11953       HOST_WIDE_INT const_op;
11954 
11955       /* We only want to handle integral modes.  This catches VOIDmode,
11956 	 CCmode, and the floating-point modes.  An exception is that we
11957 	 can handle VOIDmode if OP0 is a COMPARE or a comparison
11958 	 operation.  */
11959 
11960       if (GET_MODE_CLASS (mode) != MODE_INT
11961 	  && ! (mode == VOIDmode
11962 		&& (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11963 	break;
11964 
11965       /* Try to simplify the compare to constant, possibly changing the
11966 	 comparison op, and/or changing op1 to zero.  */
11967       code = simplify_compare_const (code, mode, op0, &op1);
11968       const_op = INTVAL (op1);
11969 
11970       /* Compute some predicates to simplify code below.  */
11971 
11972       equality_comparison_p = (code == EQ || code == NE);
11973       sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11974       unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11975 			       || code == GEU);
11976 
11977       /* If this is a sign bit comparison and we can do arithmetic in
11978 	 MODE, say that we will only be needing the sign bit of OP0.  */
11979       if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11980 	op0 = force_to_mode (op0, mode,
11981 			     HOST_WIDE_INT_1U
11982 			     << (GET_MODE_PRECISION (mode) - 1),
11983 			     0);
11984 
11985       /* Now try cases based on the opcode of OP0.  If none of the cases
11986 	 does a "continue", we exit this loop immediately after the
11987 	 switch.  */
11988 
11989       switch (GET_CODE (op0))
11990 	{
11991 	case ZERO_EXTRACT:
11992 	  /* If we are extracting a single bit from a variable position in
11993 	     a constant that has only a single bit set and are comparing it
11994 	     with zero, we can convert this into an equality comparison
11995 	     between the position and the location of the single bit.  */
11996 	  /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11997 	     have already reduced the shift count modulo the word size.  */
11998 	  if (!SHIFT_COUNT_TRUNCATED
11999 	      && CONST_INT_P (XEXP (op0, 0))
12000 	      && XEXP (op0, 1) == const1_rtx
12001 	      && equality_comparison_p && const_op == 0
12002 	      && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12003 	    {
12004 	      if (BITS_BIG_ENDIAN)
12005 		i = BITS_PER_WORD - 1 - i;
12006 
12007 	      op0 = XEXP (op0, 2);
12008 	      op1 = GEN_INT (i);
12009 	      const_op = i;
12010 
12011 	      /* Result is nonzero iff shift count is equal to I.  */
12012 	      code = reverse_condition (code);
12013 	      continue;
12014 	    }
12015 
12016 	  /* fall through */
12017 
12018 	case SIGN_EXTRACT:
12019 	  tem = expand_compound_operation (op0);
12020 	  if (tem != op0)
12021 	    {
12022 	      op0 = tem;
12023 	      continue;
12024 	    }
12025 	  break;
12026 
12027 	case NOT:
12028 	  /* If testing for equality, we can take the NOT of the constant.  */
12029 	  if (equality_comparison_p
12030 	      && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12031 	    {
12032 	      op0 = XEXP (op0, 0);
12033 	      op1 = tem;
12034 	      continue;
12035 	    }
12036 
12037 	  /* If just looking at the sign bit, reverse the sense of the
12038 	     comparison.  */
12039 	  if (sign_bit_comparison_p)
12040 	    {
12041 	      op0 = XEXP (op0, 0);
12042 	      code = (code == GE ? LT : GE);
12043 	      continue;
12044 	    }
12045 	  break;
12046 
12047 	case NEG:
12048 	  /* If testing for equality, we can take the NEG of the constant.  */
12049 	  if (equality_comparison_p
12050 	      && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12051 	    {
12052 	      op0 = XEXP (op0, 0);
12053 	      op1 = tem;
12054 	      continue;
12055 	    }
12056 
12057 	  /* The remaining cases only apply to comparisons with zero.  */
12058 	  if (const_op != 0)
12059 	    break;
12060 
12061 	  /* When X is ABS or is known positive,
12062 	     (neg X) is < 0 if and only if X != 0.  */
12063 
12064 	  if (sign_bit_comparison_p
12065 	      && (GET_CODE (XEXP (op0, 0)) == ABS
12066 		  || (mode_width <= HOST_BITS_PER_WIDE_INT
12067 		      && (nonzero_bits (XEXP (op0, 0), mode)
12068 			  & (HOST_WIDE_INT_1U << (mode_width - 1)))
12069 			 == 0)))
12070 	    {
12071 	      op0 = XEXP (op0, 0);
12072 	      code = (code == LT ? NE : EQ);
12073 	      continue;
12074 	    }
12075 
12076 	  /* If we have NEG of something whose two high-order bits are the
12077 	     same, we know that "(-a) < 0" is equivalent to "a > 0".  */
12078 	  if (num_sign_bit_copies (op0, mode) >= 2)
12079 	    {
12080 	      op0 = XEXP (op0, 0);
12081 	      code = swap_condition (code);
12082 	      continue;
12083 	    }
12084 	  break;
12085 
12086 	case ROTATE:
12087 	  /* If we are testing equality and our count is a constant, we
12088 	     can perform the inverse operation on our RHS.  */
12089 	  if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12090 	      && (tem = simplify_binary_operation (ROTATERT, mode,
12091 						   op1, XEXP (op0, 1))) != 0)
12092 	    {
12093 	      op0 = XEXP (op0, 0);
12094 	      op1 = tem;
12095 	      continue;
12096 	    }
12097 
12098 	  /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12099 	     a particular bit.  Convert it to an AND of a constant of that
12100 	     bit.  This will be converted into a ZERO_EXTRACT.  */
12101 	  if (const_op == 0 && sign_bit_comparison_p
12102 	      && CONST_INT_P (XEXP (op0, 1))
12103 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
12104 	    {
12105 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12106 					    (HOST_WIDE_INT_1U
12107 					     << (mode_width - 1
12108 						 - INTVAL (XEXP (op0, 1)))));
12109 	      code = (code == LT ? NE : EQ);
12110 	      continue;
12111 	    }
12112 
12113 	  /* Fall through.  */
12114 
12115 	case ABS:
12116 	  /* ABS is ignorable inside an equality comparison with zero.  */
12117 	  if (const_op == 0 && equality_comparison_p)
12118 	    {
12119 	      op0 = XEXP (op0, 0);
12120 	      continue;
12121 	    }
12122 	  break;
12123 
12124 	case SIGN_EXTEND:
12125 	  /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12126 	     (compare FOO CONST) if CONST fits in FOO's mode and we
12127 	     are either testing inequality or have an unsigned
12128 	     comparison with ZERO_EXTEND or a signed comparison with
12129 	     SIGN_EXTEND.  But don't do it if we don't have a compare
12130 	     insn of the given mode, since we'd have to revert it
12131 	     later on, and then we wouldn't know whether to sign- or
12132 	     zero-extend.  */
12133 	  mode = GET_MODE (XEXP (op0, 0));
12134 	  if (GET_MODE_CLASS (mode) == MODE_INT
12135 	      && ! unsigned_comparison_p
12136 	      && HWI_COMPUTABLE_MODE_P (mode)
12137 	      && trunc_int_for_mode (const_op, mode) == const_op
12138 	      && have_insn_for (COMPARE, mode))
12139 	    {
12140 	      op0 = XEXP (op0, 0);
12141 	      continue;
12142 	    }
12143 	  break;
12144 
12145 	case SUBREG:
12146 	  /* Check for the case where we are comparing A - C1 with C2, that is
12147 
12148 	       (subreg:MODE (plus (A) (-C1))) op (C2)
12149 
12150 	     with C1 a constant, and try to lift the SUBREG, i.e. to do the
12151 	     comparison in the wider mode.  One of the following two conditions
12152 	     must be true in order for this to be valid:
12153 
12154 	       1. The mode extension results in the same bit pattern being added
12155 		  on both sides and the comparison is equality or unsigned.  As
12156 		  C2 has been truncated to fit in MODE, the pattern can only be
12157 		  all 0s or all 1s.
12158 
12159 	       2. The mode extension results in the sign bit being copied on
12160 		  each side.
12161 
12162 	     The difficulty here is that we have predicates for A but not for
12163 	     (A - C1) so we need to check that C1 is within proper bounds so
12164 	     as to perturbate A as little as possible.  */
12165 
12166 	  if (mode_width <= HOST_BITS_PER_WIDE_INT
12167 	      && subreg_lowpart_p (op0)
12168 	      && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
12169 	      && GET_CODE (SUBREG_REG (op0)) == PLUS
12170 	      && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12171 	    {
12172 	      machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
12173 	      rtx a = XEXP (SUBREG_REG (op0), 0);
12174 	      HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12175 
12176 	      if ((c1 > 0
12177 		   && (unsigned HOST_WIDE_INT) c1
12178 		       < HOST_WIDE_INT_1U << (mode_width - 1)
12179 		   && (equality_comparison_p || unsigned_comparison_p)
12180 		   /* (A - C1) zero-extends if it is positive and sign-extends
12181 		      if it is negative, C2 both zero- and sign-extends.  */
12182 		   && ((0 == (nonzero_bits (a, inner_mode)
12183 			      & ~GET_MODE_MASK (mode))
12184 			&& const_op >= 0)
12185 		       /* (A - C1) sign-extends if it is positive and 1-extends
12186 			  if it is negative, C2 both sign- and 1-extends.  */
12187 		       || (num_sign_bit_copies (a, inner_mode)
12188 			   > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12189 					     - mode_width)
12190 			   && const_op < 0)))
12191 		  || ((unsigned HOST_WIDE_INT) c1
12192 		       < HOST_WIDE_INT_1U << (mode_width - 2)
12193 		      /* (A - C1) always sign-extends, like C2.  */
12194 		      && num_sign_bit_copies (a, inner_mode)
12195 			 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12196 					   - (mode_width - 1))))
12197 		{
12198 		  op0 = SUBREG_REG (op0);
12199 		  continue;
12200 		}
12201 	    }
12202 
12203 	  /* If the inner mode is narrower and we are extracting the low part,
12204 	     we can treat the SUBREG as if it were a ZERO_EXTEND.  */
12205 	  if (subreg_lowpart_p (op0)
12206 	      && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
12207 	    ;
12208 	  else if (subreg_lowpart_p (op0)
12209 		   && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12210 		   && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12211 		   && (code == NE || code == EQ)
12212 		   && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12213 		       <= HOST_BITS_PER_WIDE_INT)
12214 		   && !paradoxical_subreg_p (op0)
12215 		   && (nonzero_bits (SUBREG_REG (op0),
12216 				     GET_MODE (SUBREG_REG (op0)))
12217 		       & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12218 	    {
12219 	      /* Remove outer subregs that don't do anything.  */
12220 	      tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12221 
12222 	      if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12223 		   & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12224 		{
12225 		  op0 = SUBREG_REG (op0);
12226 		  op1 = tem;
12227 		  continue;
12228 		}
12229 	      break;
12230 	    }
12231 	  else
12232 	    break;
12233 
12234 	  /* FALLTHROUGH */
12235 
12236 	case ZERO_EXTEND:
12237 	  mode = GET_MODE (XEXP (op0, 0));
12238 	  if (GET_MODE_CLASS (mode) == MODE_INT
12239 	      && (unsigned_comparison_p || equality_comparison_p)
12240 	      && HWI_COMPUTABLE_MODE_P (mode)
12241 	      && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12242 	      && const_op >= 0
12243 	      && have_insn_for (COMPARE, mode))
12244 	    {
12245 	      op0 = XEXP (op0, 0);
12246 	      continue;
12247 	    }
12248 	  break;
12249 
12250 	case PLUS:
12251 	  /* (eq (plus X A) B) -> (eq X (minus B A)).  We can only do
12252 	     this for equality comparisons due to pathological cases involving
12253 	     overflows.  */
12254 	  if (equality_comparison_p
12255 	      && 0 != (tem = simplify_binary_operation (MINUS, mode,
12256 							op1, XEXP (op0, 1))))
12257 	    {
12258 	      op0 = XEXP (op0, 0);
12259 	      op1 = tem;
12260 	      continue;
12261 	    }
12262 
12263 	  /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0.  */
12264 	  if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12265 	      && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12266 	    {
12267 	      op0 = XEXP (XEXP (op0, 0), 0);
12268 	      code = (code == LT ? EQ : NE);
12269 	      continue;
12270 	    }
12271 	  break;
12272 
12273 	case MINUS:
12274 	  /* We used to optimize signed comparisons against zero, but that
12275 	     was incorrect.  Unsigned comparisons against zero (GTU, LEU)
12276 	     arrive here as equality comparisons, or (GEU, LTU) are
12277 	     optimized away.  No need to special-case them.  */
12278 
12279 	  /* (eq (minus A B) C) -> (eq A (plus B C)) or
12280 	     (eq B (minus A C)), whichever simplifies.  We can only do
12281 	     this for equality comparisons due to pathological cases involving
12282 	     overflows.  */
12283 	  if (equality_comparison_p
12284 	      && 0 != (tem = simplify_binary_operation (PLUS, mode,
12285 							XEXP (op0, 1), op1)))
12286 	    {
12287 	      op0 = XEXP (op0, 0);
12288 	      op1 = tem;
12289 	      continue;
12290 	    }
12291 
12292 	  if (equality_comparison_p
12293 	      && 0 != (tem = simplify_binary_operation (MINUS, mode,
12294 							XEXP (op0, 0), op1)))
12295 	    {
12296 	      op0 = XEXP (op0, 1);
12297 	      op1 = tem;
12298 	      continue;
12299 	    }
12300 
12301 	  /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12302 	     of bits in X minus 1, is one iff X > 0.  */
12303 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12304 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12305 	      && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12306 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12307 	    {
12308 	      op0 = XEXP (op0, 1);
12309 	      code = (code == GE ? LE : GT);
12310 	      continue;
12311 	    }
12312 	  break;
12313 
12314 	case XOR:
12315 	  /* (eq (xor A B) C) -> (eq A (xor B C)).  This is a simplification
12316 	     if C is zero or B is a constant.  */
12317 	  if (equality_comparison_p
12318 	      && 0 != (tem = simplify_binary_operation (XOR, mode,
12319 							XEXP (op0, 1), op1)))
12320 	    {
12321 	      op0 = XEXP (op0, 0);
12322 	      op1 = tem;
12323 	      continue;
12324 	    }
12325 	  break;
12326 
12327 	case EQ:  case NE:
12328 	case UNEQ:  case LTGT:
12329 	case LT:  case LTU:  case UNLT:  case LE:  case LEU:  case UNLE:
12330 	case GT:  case GTU:  case UNGT:  case GE:  case GEU:  case UNGE:
12331 	case UNORDERED: case ORDERED:
12332 	  /* We can't do anything if OP0 is a condition code value, rather
12333 	     than an actual data value.  */
12334 	  if (const_op != 0
12335 	      || CC0_P (XEXP (op0, 0))
12336 	      || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12337 	    break;
12338 
12339 	  /* Get the two operands being compared.  */
12340 	  if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12341 	    tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12342 	  else
12343 	    tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12344 
12345 	  /* Check for the cases where we simply want the result of the
12346 	     earlier test or the opposite of that result.  */
12347 	  if (code == NE || code == EQ
12348 	      || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
12349 		  && (code == LT || code == GE)))
12350 	    {
12351 	      enum rtx_code new_code;
12352 	      if (code == LT || code == NE)
12353 		new_code = GET_CODE (op0);
12354 	      else
12355 		new_code = reversed_comparison_code (op0, NULL);
12356 
12357 	      if (new_code != UNKNOWN)
12358 		{
12359 		  code = new_code;
12360 		  op0 = tem;
12361 		  op1 = tem1;
12362 		  continue;
12363 		}
12364 	    }
12365 	  break;
12366 
12367 	case IOR:
12368 	  /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12369 	     iff X <= 0.  */
12370 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12371 	      && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12372 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12373 	    {
12374 	      op0 = XEXP (op0, 1);
12375 	      code = (code == GE ? GT : LE);
12376 	      continue;
12377 	    }
12378 	  break;
12379 
12380 	case AND:
12381 	  /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1).  This
12382 	     will be converted to a ZERO_EXTRACT later.  */
12383 	  if (const_op == 0 && equality_comparison_p
12384 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12385 	      && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12386 	    {
12387 	      op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12388 				      XEXP (XEXP (op0, 0), 1));
12389 	      op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12390 	      continue;
12391 	    }
12392 
12393 	  /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12394 	     zero and X is a comparison and C1 and C2 describe only bits set
12395 	     in STORE_FLAG_VALUE, we can compare with X.  */
12396 	  if (const_op == 0 && equality_comparison_p
12397 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12398 	      && CONST_INT_P (XEXP (op0, 1))
12399 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12400 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12401 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12402 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12403 	    {
12404 	      mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12405 		      << INTVAL (XEXP (XEXP (op0, 0), 1)));
12406 	      if ((~STORE_FLAG_VALUE & mask) == 0
12407 		  && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12408 		      || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12409 			  && COMPARISON_P (tem))))
12410 		{
12411 		  op0 = XEXP (XEXP (op0, 0), 0);
12412 		  continue;
12413 		}
12414 	    }
12415 
12416 	  /* If we are doing an equality comparison of an AND of a bit equal
12417 	     to the sign bit, replace this with a LT or GE comparison of
12418 	     the underlying value.  */
12419 	  if (equality_comparison_p
12420 	      && const_op == 0
12421 	      && CONST_INT_P (XEXP (op0, 1))
12422 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12423 	      && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12424 		  == HOST_WIDE_INT_1U << (mode_width - 1)))
12425 	    {
12426 	      op0 = XEXP (op0, 0);
12427 	      code = (code == EQ ? GE : LT);
12428 	      continue;
12429 	    }
12430 
12431 	  /* If this AND operation is really a ZERO_EXTEND from a narrower
12432 	     mode, the constant fits within that mode, and this is either an
12433 	     equality or unsigned comparison, try to do this comparison in
12434 	     the narrower mode.
12435 
12436 	     Note that in:
12437 
12438 	     (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12439 	     -> (ne:DI (reg:SI 4) (const_int 0))
12440 
12441 	     unless TRULY_NOOP_TRUNCATION allows it or the register is
12442 	     known to hold a value of the required mode the
12443 	     transformation is invalid.  */
12444 	  if ((equality_comparison_p || unsigned_comparison_p)
12445 	      && CONST_INT_P (XEXP (op0, 1))
12446 	      && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12447 				   & GET_MODE_MASK (mode))
12448 				  + 1)) >= 0
12449 	      && const_op >> i == 0
12450 	      && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
12451 	    {
12452 	      op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12453 	      continue;
12454 	    }
12455 
12456 	  /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12457 	     fits in both M1 and M2 and the SUBREG is either paradoxical
12458 	     or represents the low part, permute the SUBREG and the AND
12459 	     and try again.  */
12460 	  if (GET_CODE (XEXP (op0, 0)) == SUBREG
12461 	      && CONST_INT_P (XEXP (op0, 1)))
12462 	    {
12463 	      tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
12464 	      unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12465 	      /* Require an integral mode, to avoid creating something like
12466 		 (AND:SF ...).  */
12467 	      if (SCALAR_INT_MODE_P (tmode)
12468 		  /* It is unsafe to commute the AND into the SUBREG if the
12469 		     SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12470 		     not defined.  As originally written the upper bits
12471 		     have a defined value due to the AND operation.
12472 		     However, if we commute the AND inside the SUBREG then
12473 		     they no longer have defined values and the meaning of
12474 		     the code has been changed.
12475 		     Also C1 should not change value in the smaller mode,
12476 		     see PR67028 (a positive C1 can become negative in the
12477 		     smaller mode, so that the AND does no longer mask the
12478 		     upper bits).  */
12479 		  && ((WORD_REGISTER_OPERATIONS
12480 		       && mode_width > GET_MODE_PRECISION (tmode)
12481 		       && mode_width <= BITS_PER_WORD
12482 		       && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12483 		      || (mode_width <= GET_MODE_PRECISION (tmode)
12484 			  && subreg_lowpart_p (XEXP (op0, 0))))
12485 		  && mode_width <= HOST_BITS_PER_WIDE_INT
12486 		  && HWI_COMPUTABLE_MODE_P (tmode)
12487 		  && (c1 & ~mask) == 0
12488 		  && (c1 & ~GET_MODE_MASK (tmode)) == 0
12489 		  && c1 != mask
12490 		  && c1 != GET_MODE_MASK (tmode))
12491 		{
12492 		  op0 = simplify_gen_binary (AND, tmode,
12493 					     SUBREG_REG (XEXP (op0, 0)),
12494 					     gen_int_mode (c1, tmode));
12495 		  op0 = gen_lowpart (mode, op0);
12496 		  continue;
12497 		}
12498 	    }
12499 
12500 	  /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0).  */
12501 	  if (const_op == 0 && equality_comparison_p
12502 	      && XEXP (op0, 1) == const1_rtx
12503 	      && GET_CODE (XEXP (op0, 0)) == NOT)
12504 	    {
12505 	      op0 = simplify_and_const_int (NULL_RTX, mode,
12506 					    XEXP (XEXP (op0, 0), 0), 1);
12507 	      code = (code == NE ? EQ : NE);
12508 	      continue;
12509 	    }
12510 
12511 	  /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12512 	     (eq (and (lshiftrt X) 1) 0).
12513 	     Also handle the case where (not X) is expressed using xor.  */
12514 	  if (const_op == 0 && equality_comparison_p
12515 	      && XEXP (op0, 1) == const1_rtx
12516 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12517 	    {
12518 	      rtx shift_op = XEXP (XEXP (op0, 0), 0);
12519 	      rtx shift_count = XEXP (XEXP (op0, 0), 1);
12520 
12521 	      if (GET_CODE (shift_op) == NOT
12522 		  || (GET_CODE (shift_op) == XOR
12523 		      && CONST_INT_P (XEXP (shift_op, 1))
12524 		      && CONST_INT_P (shift_count)
12525 		      && HWI_COMPUTABLE_MODE_P (mode)
12526 		      && (UINTVAL (XEXP (shift_op, 1))
12527 			  == HOST_WIDE_INT_1U
12528 			       << INTVAL (shift_count))))
12529 		{
12530 		  op0
12531 		    = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12532 		  op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12533 		  code = (code == NE ? EQ : NE);
12534 		  continue;
12535 		}
12536 	    }
12537 	  break;
12538 
12539 	case ASHIFT:
12540 	  /* If we have (compare (ashift FOO N) (const_int C)) and
12541 	     the high order N bits of FOO (N+1 if an inequality comparison)
12542 	     are known to be zero, we can do this by comparing FOO with C
12543 	     shifted right N bits so long as the low-order N bits of C are
12544 	     zero.  */
12545 	  if (CONST_INT_P (XEXP (op0, 1))
12546 	      && INTVAL (XEXP (op0, 1)) >= 0
12547 	      && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12548 		  < HOST_BITS_PER_WIDE_INT)
12549 	      && (((unsigned HOST_WIDE_INT) const_op
12550 		   & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12551 		      - 1)) == 0)
12552 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12553 	      && (nonzero_bits (XEXP (op0, 0), mode)
12554 		  & ~(mask >> (INTVAL (XEXP (op0, 1))
12555 			       + ! equality_comparison_p))) == 0)
12556 	    {
12557 	      /* We must perform a logical shift, not an arithmetic one,
12558 		 as we want the top N bits of C to be zero.  */
12559 	      unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12560 
12561 	      temp >>= INTVAL (XEXP (op0, 1));
12562 	      op1 = gen_int_mode (temp, mode);
12563 	      op0 = XEXP (op0, 0);
12564 	      continue;
12565 	    }
12566 
12567 	  /* If we are doing a sign bit comparison, it means we are testing
12568 	     a particular bit.  Convert it to the appropriate AND.  */
12569 	  if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12570 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
12571 	    {
12572 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12573 					    (HOST_WIDE_INT_1U
12574 					     << (mode_width - 1
12575 						 - INTVAL (XEXP (op0, 1)))));
12576 	      code = (code == LT ? NE : EQ);
12577 	      continue;
12578 	    }
12579 
12580 	  /* If this an equality comparison with zero and we are shifting
12581 	     the low bit to the sign bit, we can convert this to an AND of the
12582 	     low-order bit.  */
12583 	  if (const_op == 0 && equality_comparison_p
12584 	      && CONST_INT_P (XEXP (op0, 1))
12585 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12586 	    {
12587 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12588 	      continue;
12589 	    }
12590 	  break;
12591 
12592 	case ASHIFTRT:
12593 	  /* If this is an equality comparison with zero, we can do this
12594 	     as a logical shift, which might be much simpler.  */
12595 	  if (equality_comparison_p && const_op == 0
12596 	      && CONST_INT_P (XEXP (op0, 1)))
12597 	    {
12598 	      op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12599 					  XEXP (op0, 0),
12600 					  INTVAL (XEXP (op0, 1)));
12601 	      continue;
12602 	    }
12603 
12604 	  /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12605 	     do the comparison in a narrower mode.  */
12606 	  if (! unsigned_comparison_p
12607 	      && CONST_INT_P (XEXP (op0, 1))
12608 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12609 	      && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12610 	      && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12611 					 MODE_INT, 1)) != BLKmode
12612 	      && (((unsigned HOST_WIDE_INT) const_op
12613 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12614 		  <= GET_MODE_MASK (tmode)))
12615 	    {
12616 	      op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12617 	      continue;
12618 	    }
12619 
12620 	  /* Likewise if OP0 is a PLUS of a sign extension with a
12621 	     constant, which is usually represented with the PLUS
12622 	     between the shifts.  */
12623 	  if (! unsigned_comparison_p
12624 	      && CONST_INT_P (XEXP (op0, 1))
12625 	      && GET_CODE (XEXP (op0, 0)) == PLUS
12626 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12627 	      && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12628 	      && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12629 	      && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12630 					 MODE_INT, 1)) != BLKmode
12631 	      && (((unsigned HOST_WIDE_INT) const_op
12632 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12633 		  <= GET_MODE_MASK (tmode)))
12634 	    {
12635 	      rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12636 	      rtx add_const = XEXP (XEXP (op0, 0), 1);
12637 	      rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
12638 						   add_const, XEXP (op0, 1));
12639 
12640 	      op0 = simplify_gen_binary (PLUS, tmode,
12641 					 gen_lowpart (tmode, inner),
12642 					 new_const);
12643 	      continue;
12644 	    }
12645 
12646 	  /* FALLTHROUGH */
12647 	case LSHIFTRT:
12648 	  /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12649 	     the low order N bits of FOO are known to be zero, we can do this
12650 	     by comparing FOO with C shifted left N bits so long as no
12651 	     overflow occurs.  Even if the low order N bits of FOO aren't known
12652 	     to be zero, if the comparison is >= or < we can use the same
12653 	     optimization and for > or <= by setting all the low
12654 	     order N bits in the comparison constant.  */
12655 	  if (CONST_INT_P (XEXP (op0, 1))
12656 	      && INTVAL (XEXP (op0, 1)) > 0
12657 	      && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12658 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12659 	      && (((unsigned HOST_WIDE_INT) const_op
12660 		   + (GET_CODE (op0) != LSHIFTRT
12661 		      ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12662 			 + 1)
12663 		      : 0))
12664 		  <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12665 	    {
12666 	      unsigned HOST_WIDE_INT low_bits
12667 		= (nonzero_bits (XEXP (op0, 0), mode)
12668 		   & ((HOST_WIDE_INT_1U
12669 		       << INTVAL (XEXP (op0, 1))) - 1));
12670 	      if (low_bits == 0 || !equality_comparison_p)
12671 		{
12672 		  /* If the shift was logical, then we must make the condition
12673 		     unsigned.  */
12674 		  if (GET_CODE (op0) == LSHIFTRT)
12675 		    code = unsigned_condition (code);
12676 
12677 		  const_op = (unsigned HOST_WIDE_INT) const_op
12678 			      << INTVAL (XEXP (op0, 1));
12679 		  if (low_bits != 0
12680 		      && (code == GT || code == GTU
12681 			  || code == LE || code == LEU))
12682 		    const_op
12683 		      |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12684 		  op1 = GEN_INT (const_op);
12685 		  op0 = XEXP (op0, 0);
12686 		  continue;
12687 		}
12688 	    }
12689 
12690 	  /* If we are using this shift to extract just the sign bit, we
12691 	     can replace this with an LT or GE comparison.  */
12692 	  if (const_op == 0
12693 	      && (equality_comparison_p || sign_bit_comparison_p)
12694 	      && CONST_INT_P (XEXP (op0, 1))
12695 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12696 	    {
12697 	      op0 = XEXP (op0, 0);
12698 	      code = (code == NE || code == GT ? LT : GE);
12699 	      continue;
12700 	    }
12701 	  break;
12702 
12703 	default:
12704 	  break;
12705 	}
12706 
12707       break;
12708     }
12709 
12710   /* Now make any compound operations involved in this comparison.  Then,
12711      check for an outmost SUBREG on OP0 that is not doing anything or is
12712      paradoxical.  The latter transformation must only be performed when
12713      it is known that the "extra" bits will be the same in op0 and op1 or
12714      that they don't matter.  There are three cases to consider:
12715 
12716      1. SUBREG_REG (op0) is a register.  In this case the bits are don't
12717      care bits and we can assume they have any convenient value.  So
12718      making the transformation is safe.
12719 
12720      2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12721      In this case the upper bits of op0 are undefined.  We should not make
12722      the simplification in that case as we do not know the contents of
12723      those bits.
12724 
12725      3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12726      In that case we know those bits are zeros or ones.  We must also be
12727      sure that they are the same as the upper bits of op1.
12728 
12729      We can never remove a SUBREG for a non-equality comparison because
12730      the sign bit is in a different place in the underlying object.  */
12731 
12732   rtx_code op0_mco_code = SET;
12733   if (op1 == const0_rtx)
12734     op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12735 
12736   op0 = make_compound_operation (op0, op0_mco_code);
12737   op1 = make_compound_operation (op1, SET);
12738 
12739   if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12740       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12741       && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12742       && (code == NE || code == EQ))
12743     {
12744       if (paradoxical_subreg_p (op0))
12745 	{
12746 	  /* For paradoxical subregs, allow case 1 as above.  Case 3 isn't
12747 	     implemented.  */
12748 	  if (REG_P (SUBREG_REG (op0)))
12749 	    {
12750 	      op0 = SUBREG_REG (op0);
12751 	      op1 = gen_lowpart (GET_MODE (op0), op1);
12752 	    }
12753 	}
12754       else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12755 		<= HOST_BITS_PER_WIDE_INT)
12756 	       && (nonzero_bits (SUBREG_REG (op0),
12757 				 GET_MODE (SUBREG_REG (op0)))
12758 		   & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12759 	{
12760 	  tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12761 
12762 	  if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12763 	       & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12764 	    op0 = SUBREG_REG (op0), op1 = tem;
12765 	}
12766     }
12767 
12768   /* We now do the opposite procedure: Some machines don't have compare
12769      insns in all modes.  If OP0's mode is an integer mode smaller than a
12770      word and we can't do a compare in that mode, see if there is a larger
12771      mode for which we can do the compare.  There are a number of cases in
12772      which we can use the wider mode.  */
12773 
12774   mode = GET_MODE (op0);
12775   if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12776       && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12777       && ! have_insn_for (COMPARE, mode))
12778     for (tmode = GET_MODE_WIDER_MODE (mode);
12779 	 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12780 	 tmode = GET_MODE_WIDER_MODE (tmode))
12781       if (have_insn_for (COMPARE, tmode))
12782 	{
12783 	  int zero_extended;
12784 
12785 	  /* If this is a test for negative, we can make an explicit
12786 	     test of the sign bit.  Test this first so we can use
12787 	     a paradoxical subreg to extend OP0.  */
12788 
12789 	  if (op1 == const0_rtx && (code == LT || code == GE)
12790 	      && HWI_COMPUTABLE_MODE_P (mode))
12791 	    {
12792 	      unsigned HOST_WIDE_INT sign
12793 		= HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12794 	      op0 = simplify_gen_binary (AND, tmode,
12795 					 gen_lowpart (tmode, op0),
12796 					 gen_int_mode (sign, tmode));
12797 	      code = (code == LT) ? NE : EQ;
12798 	      break;
12799 	    }
12800 
12801 	  /* If the only nonzero bits in OP0 and OP1 are those in the
12802 	     narrower mode and this is an equality or unsigned comparison,
12803 	     we can use the wider mode.  Similarly for sign-extended
12804 	     values, in which case it is true for all comparisons.  */
12805 	  zero_extended = ((code == EQ || code == NE
12806 			    || code == GEU || code == GTU
12807 			    || code == LEU || code == LTU)
12808 			   && (nonzero_bits (op0, tmode)
12809 			       & ~GET_MODE_MASK (mode)) == 0
12810 			   && ((CONST_INT_P (op1)
12811 				|| (nonzero_bits (op1, tmode)
12812 				    & ~GET_MODE_MASK (mode)) == 0)));
12813 
12814 	  if (zero_extended
12815 	      || ((num_sign_bit_copies (op0, tmode)
12816 		   > (unsigned int) (GET_MODE_PRECISION (tmode)
12817 				     - GET_MODE_PRECISION (mode)))
12818 		  && (num_sign_bit_copies (op1, tmode)
12819 		      > (unsigned int) (GET_MODE_PRECISION (tmode)
12820 					- GET_MODE_PRECISION (mode)))))
12821 	    {
12822 	      /* If OP0 is an AND and we don't have an AND in MODE either,
12823 		 make a new AND in the proper mode.  */
12824 	      if (GET_CODE (op0) == AND
12825 		  && !have_insn_for (AND, mode))
12826 		op0 = simplify_gen_binary (AND, tmode,
12827 					   gen_lowpart (tmode,
12828 							XEXP (op0, 0)),
12829 					   gen_lowpart (tmode,
12830 							XEXP (op0, 1)));
12831 	      else
12832 		{
12833 		  if (zero_extended)
12834 		    {
12835 		      op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12836 		      op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12837 		    }
12838 		  else
12839 		    {
12840 		      op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12841 		      op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12842 		    }
12843 		  break;
12844 		}
12845 	    }
12846 	}
12847 
12848   /* We may have changed the comparison operands.  Re-canonicalize.  */
12849   if (swap_commutative_operands_p (op0, op1))
12850     {
12851       std::swap (op0, op1);
12852       code = swap_condition (code);
12853     }
12854 
12855   /* If this machine only supports a subset of valid comparisons, see if we
12856      can convert an unsupported one into a supported one.  */
12857   target_canonicalize_comparison (&code, &op0, &op1, 0);
12858 
12859   *pop0 = op0;
12860   *pop1 = op1;
12861 
12862   return code;
12863 }
12864 
12865 /* Utility function for record_value_for_reg.  Count number of
12866    rtxs in X.  */
12867 static int
12868 count_rtxs (rtx x)
12869 {
12870   enum rtx_code code = GET_CODE (x);
12871   const char *fmt;
12872   int i, j, ret = 1;
12873 
12874   if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12875       || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12876     {
12877       rtx x0 = XEXP (x, 0);
12878       rtx x1 = XEXP (x, 1);
12879 
12880       if (x0 == x1)
12881 	return 1 + 2 * count_rtxs (x0);
12882 
12883       if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12884 	   || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12885 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12886 	return 2 + 2 * count_rtxs (x0)
12887 	       + count_rtxs (x == XEXP (x1, 0)
12888 			     ? XEXP (x1, 1) : XEXP (x1, 0));
12889 
12890       if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12891 	   || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12892 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12893 	return 2 + 2 * count_rtxs (x1)
12894 	       + count_rtxs (x == XEXP (x0, 0)
12895 			     ? XEXP (x0, 1) : XEXP (x0, 0));
12896     }
12897 
12898   fmt = GET_RTX_FORMAT (code);
12899   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12900     if (fmt[i] == 'e')
12901       ret += count_rtxs (XEXP (x, i));
12902     else if (fmt[i] == 'E')
12903       for (j = 0; j < XVECLEN (x, i); j++)
12904 	ret += count_rtxs (XVECEXP (x, i, j));
12905 
12906   return ret;
12907 }
12908 
12909 /* Utility function for following routine.  Called when X is part of a value
12910    being stored into last_set_value.  Sets last_set_table_tick
12911    for each register mentioned.  Similar to mention_regs in cse.c  */
12912 
12913 static void
12914 update_table_tick (rtx x)
12915 {
12916   enum rtx_code code = GET_CODE (x);
12917   const char *fmt = GET_RTX_FORMAT (code);
12918   int i, j;
12919 
12920   if (code == REG)
12921     {
12922       unsigned int regno = REGNO (x);
12923       unsigned int endregno = END_REGNO (x);
12924       unsigned int r;
12925 
12926       for (r = regno; r < endregno; r++)
12927 	{
12928 	  reg_stat_type *rsp = &reg_stat[r];
12929 	  rsp->last_set_table_tick = label_tick;
12930 	}
12931 
12932       return;
12933     }
12934 
12935   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12936     if (fmt[i] == 'e')
12937       {
12938 	/* Check for identical subexpressions.  If x contains
12939 	   identical subexpression we only have to traverse one of
12940 	   them.  */
12941 	if (i == 0 && ARITHMETIC_P (x))
12942 	  {
12943 	    /* Note that at this point x1 has already been
12944 	       processed.  */
12945 	    rtx x0 = XEXP (x, 0);
12946 	    rtx x1 = XEXP (x, 1);
12947 
12948 	    /* If x0 and x1 are identical then there is no need to
12949 	       process x0.  */
12950 	    if (x0 == x1)
12951 	      break;
12952 
12953 	    /* If x0 is identical to a subexpression of x1 then while
12954 	       processing x1, x0 has already been processed.  Thus we
12955 	       are done with x.  */
12956 	    if (ARITHMETIC_P (x1)
12957 		&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12958 	      break;
12959 
12960 	    /* If x1 is identical to a subexpression of x0 then we
12961 	       still have to process the rest of x0.  */
12962 	    if (ARITHMETIC_P (x0)
12963 		&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12964 	      {
12965 		update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12966 		break;
12967 	      }
12968 	  }
12969 
12970 	update_table_tick (XEXP (x, i));
12971       }
12972     else if (fmt[i] == 'E')
12973       for (j = 0; j < XVECLEN (x, i); j++)
12974 	update_table_tick (XVECEXP (x, i, j));
12975 }
12976 
12977 /* Record that REG is set to VALUE in insn INSN.  If VALUE is zero, we
12978    are saying that the register is clobbered and we no longer know its
12979    value.  If INSN is zero, don't update reg_stat[].last_set; this is
12980    only permitted with VALUE also zero and is used to invalidate the
12981    register.  */
12982 
12983 static void
12984 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
12985 {
12986   unsigned int regno = REGNO (reg);
12987   unsigned int endregno = END_REGNO (reg);
12988   unsigned int i;
12989   reg_stat_type *rsp;
12990 
12991   /* If VALUE contains REG and we have a previous value for REG, substitute
12992      the previous value.  */
12993   if (value && insn && reg_overlap_mentioned_p (reg, value))
12994     {
12995       rtx tem;
12996 
12997       /* Set things up so get_last_value is allowed to see anything set up to
12998 	 our insn.  */
12999       subst_low_luid = DF_INSN_LUID (insn);
13000       tem = get_last_value (reg);
13001 
13002       /* If TEM is simply a binary operation with two CLOBBERs as operands,
13003 	 it isn't going to be useful and will take a lot of time to process,
13004 	 so just use the CLOBBER.  */
13005 
13006       if (tem)
13007 	{
13008 	  if (ARITHMETIC_P (tem)
13009 	      && GET_CODE (XEXP (tem, 0)) == CLOBBER
13010 	      && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13011 	    tem = XEXP (tem, 0);
13012 	  else if (count_occurrences (value, reg, 1) >= 2)
13013 	    {
13014 	      /* If there are two or more occurrences of REG in VALUE,
13015 		 prevent the value from growing too much.  */
13016 	      if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13017 		tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13018 	    }
13019 
13020 	  value = replace_rtx (copy_rtx (value), reg, tem);
13021 	}
13022     }
13023 
13024   /* For each register modified, show we don't know its value, that
13025      we don't know about its bitwise content, that its value has been
13026      updated, and that we don't know the location of the death of the
13027      register.  */
13028   for (i = regno; i < endregno; i++)
13029     {
13030       rsp = &reg_stat[i];
13031 
13032       if (insn)
13033 	rsp->last_set = insn;
13034 
13035       rsp->last_set_value = 0;
13036       rsp->last_set_mode = VOIDmode;
13037       rsp->last_set_nonzero_bits = 0;
13038       rsp->last_set_sign_bit_copies = 0;
13039       rsp->last_death = 0;
13040       rsp->truncated_to_mode = VOIDmode;
13041     }
13042 
13043   /* Mark registers that are being referenced in this value.  */
13044   if (value)
13045     update_table_tick (value);
13046 
13047   /* Now update the status of each register being set.
13048      If someone is using this register in this block, set this register
13049      to invalid since we will get confused between the two lives in this
13050      basic block.  This makes using this register always invalid.  In cse, we
13051      scan the table to invalidate all entries using this register, but this
13052      is too much work for us.  */
13053 
13054   for (i = regno; i < endregno; i++)
13055     {
13056       rsp = &reg_stat[i];
13057       rsp->last_set_label = label_tick;
13058       if (!insn
13059 	  || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13060 	rsp->last_set_invalid = 1;
13061       else
13062 	rsp->last_set_invalid = 0;
13063     }
13064 
13065   /* The value being assigned might refer to X (like in "x++;").  In that
13066      case, we must replace it with (clobber (const_int 0)) to prevent
13067      infinite loops.  */
13068   rsp = &reg_stat[regno];
13069   if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13070     {
13071       value = copy_rtx (value);
13072       if (!get_last_value_validate (&value, insn, label_tick, 1))
13073 	value = 0;
13074     }
13075 
13076   /* For the main register being modified, update the value, the mode, the
13077      nonzero bits, and the number of sign bit copies.  */
13078 
13079   rsp->last_set_value = value;
13080 
13081   if (value)
13082     {
13083       machine_mode mode = GET_MODE (reg);
13084       subst_low_luid = DF_INSN_LUID (insn);
13085       rsp->last_set_mode = mode;
13086       if (GET_MODE_CLASS (mode) == MODE_INT
13087 	  && HWI_COMPUTABLE_MODE_P (mode))
13088 	mode = nonzero_bits_mode;
13089       rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13090       rsp->last_set_sign_bit_copies
13091 	= num_sign_bit_copies (value, GET_MODE (reg));
13092     }
13093 }
13094 
13095 /* Called via note_stores from record_dead_and_set_regs to handle one
13096    SET or CLOBBER in an insn.  DATA is the instruction in which the
13097    set is occurring.  */
13098 
13099 static void
13100 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13101 {
13102   rtx_insn *record_dead_insn = (rtx_insn *) data;
13103 
13104   if (GET_CODE (dest) == SUBREG)
13105     dest = SUBREG_REG (dest);
13106 
13107   if (!record_dead_insn)
13108     {
13109       if (REG_P (dest))
13110 	record_value_for_reg (dest, NULL, NULL_RTX);
13111       return;
13112     }
13113 
13114   if (REG_P (dest))
13115     {
13116       /* If we are setting the whole register, we know its value.  Otherwise
13117 	 show that we don't know the value.  We can handle a SUBREG if it's
13118 	 the low part, but we must be careful with paradoxical SUBREGs on
13119 	 RISC architectures because we cannot strip e.g. an extension around
13120 	 a load and record the naked load since the RTL middle-end considers
13121 	 that the upper bits are defined according to LOAD_EXTEND_OP.  */
13122       if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13123 	record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13124       else if (GET_CODE (setter) == SET
13125 	       && GET_CODE (SET_DEST (setter)) == SUBREG
13126 	       && SUBREG_REG (SET_DEST (setter)) == dest
13127 	       && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13128 	       && subreg_lowpart_p (SET_DEST (setter)))
13129 	record_value_for_reg (dest, record_dead_insn,
13130 			      WORD_REGISTER_OPERATIONS
13131 			      && word_register_operation_p (SET_SRC (setter))
13132 			      && paradoxical_subreg_p (SET_DEST (setter))
13133 			      ? SET_SRC (setter)
13134 			      : gen_lowpart (GET_MODE (dest),
13135 					     SET_SRC (setter)));
13136       else
13137 	record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13138     }
13139   else if (MEM_P (dest)
13140 	   /* Ignore pushes, they clobber nothing.  */
13141 	   && ! push_operand (dest, GET_MODE (dest)))
13142     mem_last_set = DF_INSN_LUID (record_dead_insn);
13143 }
13144 
13145 /* Update the records of when each REG was most recently set or killed
13146    for the things done by INSN.  This is the last thing done in processing
13147    INSN in the combiner loop.
13148 
13149    We update reg_stat[], in particular fields last_set, last_set_value,
13150    last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13151    last_death, and also the similar information mem_last_set (which insn
13152    most recently modified memory) and last_call_luid (which insn was the
13153    most recent subroutine call).  */
13154 
13155 static void
13156 record_dead_and_set_regs (rtx_insn *insn)
13157 {
13158   rtx link;
13159   unsigned int i;
13160 
13161   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13162     {
13163       if (REG_NOTE_KIND (link) == REG_DEAD
13164 	  && REG_P (XEXP (link, 0)))
13165 	{
13166 	  unsigned int regno = REGNO (XEXP (link, 0));
13167 	  unsigned int endregno = END_REGNO (XEXP (link, 0));
13168 
13169 	  for (i = regno; i < endregno; i++)
13170 	    {
13171 	      reg_stat_type *rsp;
13172 
13173 	      rsp = &reg_stat[i];
13174 	      rsp->last_death = insn;
13175 	    }
13176 	}
13177       else if (REG_NOTE_KIND (link) == REG_INC)
13178 	record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13179     }
13180 
13181   if (CALL_P (insn))
13182     {
13183       hard_reg_set_iterator hrsi;
13184       EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13185 	{
13186 	  reg_stat_type *rsp;
13187 
13188 	  rsp = &reg_stat[i];
13189 	  rsp->last_set_invalid = 1;
13190 	  rsp->last_set = insn;
13191 	  rsp->last_set_value = 0;
13192 	  rsp->last_set_mode = VOIDmode;
13193 	  rsp->last_set_nonzero_bits = 0;
13194 	  rsp->last_set_sign_bit_copies = 0;
13195 	  rsp->last_death = 0;
13196 	  rsp->truncated_to_mode = VOIDmode;
13197 	}
13198 
13199       last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13200 
13201       /* We can't combine into a call pattern.  Remember, though, that
13202 	 the return value register is set at this LUID.  We could
13203 	 still replace a register with the return value from the
13204 	 wrong subroutine call!  */
13205       note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13206     }
13207   else
13208     note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13209 }
13210 
13211 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13212    register present in the SUBREG, so for each such SUBREG go back and
13213    adjust nonzero and sign bit information of the registers that are
13214    known to have some zero/sign bits set.
13215 
13216    This is needed because when combine blows the SUBREGs away, the
13217    information on zero/sign bits is lost and further combines can be
13218    missed because of that.  */
13219 
13220 static void
13221 record_promoted_value (rtx_insn *insn, rtx subreg)
13222 {
13223   struct insn_link *links;
13224   rtx set;
13225   unsigned int regno = REGNO (SUBREG_REG (subreg));
13226   machine_mode mode = GET_MODE (subreg);
13227 
13228   if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13229     return;
13230 
13231   for (links = LOG_LINKS (insn); links;)
13232     {
13233       reg_stat_type *rsp;
13234 
13235       insn = links->insn;
13236       set = single_set (insn);
13237 
13238       if (! set || !REG_P (SET_DEST (set))
13239 	  || REGNO (SET_DEST (set)) != regno
13240 	  || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13241 	{
13242 	  links = links->next;
13243 	  continue;
13244 	}
13245 
13246       rsp = &reg_stat[regno];
13247       if (rsp->last_set == insn)
13248 	{
13249 	  if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13250 	    rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13251 	}
13252 
13253       if (REG_P (SET_SRC (set)))
13254 	{
13255 	  regno = REGNO (SET_SRC (set));
13256 	  links = LOG_LINKS (insn);
13257 	}
13258       else
13259 	break;
13260     }
13261 }
13262 
13263 /* Check if X, a register, is known to contain a value already
13264    truncated to MODE.  In this case we can use a subreg to refer to
13265    the truncated value even though in the generic case we would need
13266    an explicit truncation.  */
13267 
13268 static bool
13269 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13270 {
13271   reg_stat_type *rsp = &reg_stat[REGNO (x)];
13272   machine_mode truncated = rsp->truncated_to_mode;
13273 
13274   if (truncated == 0
13275       || rsp->truncation_label < label_tick_ebb_start)
13276     return false;
13277   if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
13278     return true;
13279   if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13280     return true;
13281   return false;
13282 }
13283 
13284 /* If X is a hard reg or a subreg record the mode that the register is
13285    accessed in.  For non-TRULY_NOOP_TRUNCATION targets we might be able
13286    to turn a truncate into a subreg using this information.  Return true
13287    if traversing X is complete.  */
13288 
13289 static bool
13290 record_truncated_value (rtx x)
13291 {
13292   machine_mode truncated_mode;
13293   reg_stat_type *rsp;
13294 
13295   if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13296     {
13297       machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13298       truncated_mode = GET_MODE (x);
13299 
13300       if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
13301 	return true;
13302 
13303       if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13304 	return true;
13305 
13306       x = SUBREG_REG (x);
13307     }
13308   /* ??? For hard-regs we now record everything.  We might be able to
13309      optimize this using last_set_mode.  */
13310   else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13311     truncated_mode = GET_MODE (x);
13312   else
13313     return false;
13314 
13315   rsp = &reg_stat[REGNO (x)];
13316   if (rsp->truncated_to_mode == 0
13317       || rsp->truncation_label < label_tick_ebb_start
13318       || (GET_MODE_SIZE (truncated_mode)
13319 	  < GET_MODE_SIZE (rsp->truncated_to_mode)))
13320     {
13321       rsp->truncated_to_mode = truncated_mode;
13322       rsp->truncation_label = label_tick;
13323     }
13324 
13325   return true;
13326 }
13327 
13328 /* Callback for note_uses.  Find hardregs and subregs of pseudos and
13329    the modes they are used in.  This can help truning TRUNCATEs into
13330    SUBREGs.  */
13331 
13332 static void
13333 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13334 {
13335   subrtx_var_iterator::array_type array;
13336   FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13337     if (record_truncated_value (*iter))
13338       iter.skip_subrtxes ();
13339 }
13340 
13341 /* Scan X for promoted SUBREGs.  For each one found,
13342    note what it implies to the registers used in it.  */
13343 
13344 static void
13345 check_promoted_subreg (rtx_insn *insn, rtx x)
13346 {
13347   if (GET_CODE (x) == SUBREG
13348       && SUBREG_PROMOTED_VAR_P (x)
13349       && REG_P (SUBREG_REG (x)))
13350     record_promoted_value (insn, x);
13351   else
13352     {
13353       const char *format = GET_RTX_FORMAT (GET_CODE (x));
13354       int i, j;
13355 
13356       for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13357 	switch (format[i])
13358 	  {
13359 	  case 'e':
13360 	    check_promoted_subreg (insn, XEXP (x, i));
13361 	    break;
13362 	  case 'V':
13363 	  case 'E':
13364 	    if (XVEC (x, i) != 0)
13365 	      for (j = 0; j < XVECLEN (x, i); j++)
13366 		check_promoted_subreg (insn, XVECEXP (x, i, j));
13367 	    break;
13368 	  }
13369     }
13370 }
13371 
13372 /* Verify that all the registers and memory references mentioned in *LOC are
13373    still valid.  *LOC was part of a value set in INSN when label_tick was
13374    equal to TICK.  Return 0 if some are not.  If REPLACE is nonzero, replace
13375    the invalid references with (clobber (const_int 0)) and return 1.  This
13376    replacement is useful because we often can get useful information about
13377    the form of a value (e.g., if it was produced by a shift that always
13378    produces -1 or 0) even though we don't know exactly what registers it
13379    was produced from.  */
13380 
13381 static int
13382 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13383 {
13384   rtx x = *loc;
13385   const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13386   int len = GET_RTX_LENGTH (GET_CODE (x));
13387   int i, j;
13388 
13389   if (REG_P (x))
13390     {
13391       unsigned int regno = REGNO (x);
13392       unsigned int endregno = END_REGNO (x);
13393       unsigned int j;
13394 
13395       for (j = regno; j < endregno; j++)
13396 	{
13397 	  reg_stat_type *rsp = &reg_stat[j];
13398 	  if (rsp->last_set_invalid
13399 	      /* If this is a pseudo-register that was only set once and not
13400 		 live at the beginning of the function, it is always valid.  */
13401 	      || (! (regno >= FIRST_PSEUDO_REGISTER
13402 		     && regno < reg_n_sets_max
13403 		     && REG_N_SETS (regno) == 1
13404 		     && (!REGNO_REG_SET_P
13405 			 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13406 			  regno)))
13407 		  && rsp->last_set_label > tick))
13408 	  {
13409 	    if (replace)
13410 	      *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13411 	    return replace;
13412 	  }
13413 	}
13414 
13415       return 1;
13416     }
13417   /* If this is a memory reference, make sure that there were no stores after
13418      it that might have clobbered the value.  We don't have alias info, so we
13419      assume any store invalidates it.  Moreover, we only have local UIDs, so
13420      we also assume that there were stores in the intervening basic blocks.  */
13421   else if (MEM_P (x) && !MEM_READONLY_P (x)
13422 	   && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13423     {
13424       if (replace)
13425 	*loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13426       return replace;
13427     }
13428 
13429   for (i = 0; i < len; i++)
13430     {
13431       if (fmt[i] == 'e')
13432 	{
13433 	  /* Check for identical subexpressions.  If x contains
13434 	     identical subexpression we only have to traverse one of
13435 	     them.  */
13436 	  if (i == 1 && ARITHMETIC_P (x))
13437 	    {
13438 	      /* Note that at this point x0 has already been checked
13439 		 and found valid.  */
13440 	      rtx x0 = XEXP (x, 0);
13441 	      rtx x1 = XEXP (x, 1);
13442 
13443 	      /* If x0 and x1 are identical then x is also valid.  */
13444 	      if (x0 == x1)
13445 		return 1;
13446 
13447 	      /* If x1 is identical to a subexpression of x0 then
13448 		 while checking x0, x1 has already been checked.  Thus
13449 		 it is valid and so as x.  */
13450 	      if (ARITHMETIC_P (x0)
13451 		  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13452 		return 1;
13453 
13454 	      /* If x0 is identical to a subexpression of x1 then x is
13455 		 valid iff the rest of x1 is valid.  */
13456 	      if (ARITHMETIC_P (x1)
13457 		  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13458 		return
13459 		  get_last_value_validate (&XEXP (x1,
13460 						  x0 == XEXP (x1, 0) ? 1 : 0),
13461 					   insn, tick, replace);
13462 	    }
13463 
13464 	  if (get_last_value_validate (&XEXP (x, i), insn, tick,
13465 				       replace) == 0)
13466 	    return 0;
13467 	}
13468       else if (fmt[i] == 'E')
13469 	for (j = 0; j < XVECLEN (x, i); j++)
13470 	  if (get_last_value_validate (&XVECEXP (x, i, j),
13471 				       insn, tick, replace) == 0)
13472 	    return 0;
13473     }
13474 
13475   /* If we haven't found a reason for it to be invalid, it is valid.  */
13476   return 1;
13477 }
13478 
13479 /* Get the last value assigned to X, if known.  Some registers
13480    in the value may be replaced with (clobber (const_int 0)) if their value
13481    is known longer known reliably.  */
13482 
13483 static rtx
13484 get_last_value (const_rtx x)
13485 {
13486   unsigned int regno;
13487   rtx value;
13488   reg_stat_type *rsp;
13489 
13490   /* If this is a non-paradoxical SUBREG, get the value of its operand and
13491      then convert it to the desired mode.  If this is a paradoxical SUBREG,
13492      we cannot predict what values the "extra" bits might have.  */
13493   if (GET_CODE (x) == SUBREG
13494       && subreg_lowpart_p (x)
13495       && !paradoxical_subreg_p (x)
13496       && (value = get_last_value (SUBREG_REG (x))) != 0)
13497     return gen_lowpart (GET_MODE (x), value);
13498 
13499   if (!REG_P (x))
13500     return 0;
13501 
13502   regno = REGNO (x);
13503   rsp = &reg_stat[regno];
13504   value = rsp->last_set_value;
13505 
13506   /* If we don't have a value, or if it isn't for this basic block and
13507      it's either a hard register, set more than once, or it's a live
13508      at the beginning of the function, return 0.
13509 
13510      Because if it's not live at the beginning of the function then the reg
13511      is always set before being used (is never used without being set).
13512      And, if it's set only once, and it's always set before use, then all
13513      uses must have the same last value, even if it's not from this basic
13514      block.  */
13515 
13516   if (value == 0
13517       || (rsp->last_set_label < label_tick_ebb_start
13518 	  && (regno < FIRST_PSEUDO_REGISTER
13519 	      || regno >= reg_n_sets_max
13520 	      || REG_N_SETS (regno) != 1
13521 	      || REGNO_REG_SET_P
13522 		 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13523     return 0;
13524 
13525   /* If the value was set in a later insn than the ones we are processing,
13526      we can't use it even if the register was only set once.  */
13527   if (rsp->last_set_label == label_tick
13528       && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13529     return 0;
13530 
13531   /* If fewer bits were set than what we are asked for now, we cannot use
13532      the value.  */
13533   if (GET_MODE_PRECISION (rsp->last_set_mode)
13534       < GET_MODE_PRECISION (GET_MODE (x)))
13535     return 0;
13536 
13537   /* If the value has all its registers valid, return it.  */
13538   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13539     return value;
13540 
13541   /* Otherwise, make a copy and replace any invalid register with
13542      (clobber (const_int 0)).  If that fails for some reason, return 0.  */
13543 
13544   value = copy_rtx (value);
13545   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13546     return value;
13547 
13548   return 0;
13549 }
13550 
13551 /* Return nonzero if expression X refers to a REG or to memory
13552    that is set in an instruction more recent than FROM_LUID.  */
13553 
13554 static int
13555 use_crosses_set_p (const_rtx x, int from_luid)
13556 {
13557   const char *fmt;
13558   int i;
13559   enum rtx_code code = GET_CODE (x);
13560 
13561   if (code == REG)
13562     {
13563       unsigned int regno = REGNO (x);
13564       unsigned endreg = END_REGNO (x);
13565 
13566 #ifdef PUSH_ROUNDING
13567       /* Don't allow uses of the stack pointer to be moved,
13568 	 because we don't know whether the move crosses a push insn.  */
13569       if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13570 	return 1;
13571 #endif
13572       for (; regno < endreg; regno++)
13573 	{
13574 	  reg_stat_type *rsp = &reg_stat[regno];
13575 	  if (rsp->last_set
13576 	      && rsp->last_set_label == label_tick
13577 	      && DF_INSN_LUID (rsp->last_set) > from_luid)
13578 	    return 1;
13579 	}
13580       return 0;
13581     }
13582 
13583   if (code == MEM && mem_last_set > from_luid)
13584     return 1;
13585 
13586   fmt = GET_RTX_FORMAT (code);
13587 
13588   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13589     {
13590       if (fmt[i] == 'E')
13591 	{
13592 	  int j;
13593 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13594 	    if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13595 	      return 1;
13596 	}
13597       else if (fmt[i] == 'e'
13598 	       && use_crosses_set_p (XEXP (x, i), from_luid))
13599 	return 1;
13600     }
13601   return 0;
13602 }
13603 
13604 /* Define three variables used for communication between the following
13605    routines.  */
13606 
13607 static unsigned int reg_dead_regno, reg_dead_endregno;
13608 static int reg_dead_flag;
13609 
13610 /* Function called via note_stores from reg_dead_at_p.
13611 
13612    If DEST is within [reg_dead_regno, reg_dead_endregno), set
13613    reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET.  */
13614 
13615 static void
13616 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13617 {
13618   unsigned int regno, endregno;
13619 
13620   if (!REG_P (dest))
13621     return;
13622 
13623   regno = REGNO (dest);
13624   endregno = END_REGNO (dest);
13625   if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13626     reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13627 }
13628 
13629 /* Return nonzero if REG is known to be dead at INSN.
13630 
13631    We scan backwards from INSN.  If we hit a REG_DEAD note or a CLOBBER
13632    referencing REG, it is dead.  If we hit a SET referencing REG, it is
13633    live.  Otherwise, see if it is live or dead at the start of the basic
13634    block we are in.  Hard regs marked as being live in NEWPAT_USED_REGS
13635    must be assumed to be always live.  */
13636 
13637 static int
13638 reg_dead_at_p (rtx reg, rtx_insn *insn)
13639 {
13640   basic_block block;
13641   unsigned int i;
13642 
13643   /* Set variables for reg_dead_at_p_1.  */
13644   reg_dead_regno = REGNO (reg);
13645   reg_dead_endregno = END_REGNO (reg);
13646 
13647   reg_dead_flag = 0;
13648 
13649   /* Check that reg isn't mentioned in NEWPAT_USED_REGS.  For fixed registers
13650      we allow the machine description to decide whether use-and-clobber
13651      patterns are OK.  */
13652   if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13653     {
13654       for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13655 	if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13656 	  return 0;
13657     }
13658 
13659   /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13660      beginning of basic block.  */
13661   block = BLOCK_FOR_INSN (insn);
13662   for (;;)
13663     {
13664       if (INSN_P (insn))
13665         {
13666 	  if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13667 	    return 1;
13668 
13669 	  note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13670 	  if (reg_dead_flag)
13671 	    return reg_dead_flag == 1 ? 1 : 0;
13672 
13673 	  if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13674 	    return 1;
13675         }
13676 
13677       if (insn == BB_HEAD (block))
13678 	break;
13679 
13680       insn = PREV_INSN (insn);
13681     }
13682 
13683   /* Look at live-in sets for the basic block that we were in.  */
13684   for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13685     if (REGNO_REG_SET_P (df_get_live_in (block), i))
13686       return 0;
13687 
13688   return 1;
13689 }
13690 
13691 /* Note hard registers in X that are used.  */
13692 
13693 static void
13694 mark_used_regs_combine (rtx x)
13695 {
13696   RTX_CODE code = GET_CODE (x);
13697   unsigned int regno;
13698   int i;
13699 
13700   switch (code)
13701     {
13702     case LABEL_REF:
13703     case SYMBOL_REF:
13704     case CONST:
13705     CASE_CONST_ANY:
13706     case PC:
13707     case ADDR_VEC:
13708     case ADDR_DIFF_VEC:
13709     case ASM_INPUT:
13710     /* CC0 must die in the insn after it is set, so we don't need to take
13711        special note of it here.  */
13712     case CC0:
13713       return;
13714 
13715     case CLOBBER:
13716       /* If we are clobbering a MEM, mark any hard registers inside the
13717 	 address as used.  */
13718       if (MEM_P (XEXP (x, 0)))
13719 	mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13720       return;
13721 
13722     case REG:
13723       regno = REGNO (x);
13724       /* A hard reg in a wide mode may really be multiple registers.
13725 	 If so, mark all of them just like the first.  */
13726       if (regno < FIRST_PSEUDO_REGISTER)
13727 	{
13728 	  /* None of this applies to the stack, frame or arg pointers.  */
13729 	  if (regno == STACK_POINTER_REGNUM
13730 	      || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13731 		  && regno == HARD_FRAME_POINTER_REGNUM)
13732 	      || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13733 		  && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13734 	      || regno == FRAME_POINTER_REGNUM)
13735 	    return;
13736 
13737 	  add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13738 	}
13739       return;
13740 
13741     case SET:
13742       {
13743 	/* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13744 	   the address.  */
13745 	rtx testreg = SET_DEST (x);
13746 
13747 	while (GET_CODE (testreg) == SUBREG
13748 	       || GET_CODE (testreg) == ZERO_EXTRACT
13749 	       || GET_CODE (testreg) == STRICT_LOW_PART)
13750 	  testreg = XEXP (testreg, 0);
13751 
13752 	if (MEM_P (testreg))
13753 	  mark_used_regs_combine (XEXP (testreg, 0));
13754 
13755 	mark_used_regs_combine (SET_SRC (x));
13756       }
13757       return;
13758 
13759     default:
13760       break;
13761     }
13762 
13763   /* Recursively scan the operands of this expression.  */
13764 
13765   {
13766     const char *fmt = GET_RTX_FORMAT (code);
13767 
13768     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13769       {
13770 	if (fmt[i] == 'e')
13771 	  mark_used_regs_combine (XEXP (x, i));
13772 	else if (fmt[i] == 'E')
13773 	  {
13774 	    int j;
13775 
13776 	    for (j = 0; j < XVECLEN (x, i); j++)
13777 	      mark_used_regs_combine (XVECEXP (x, i, j));
13778 	  }
13779       }
13780   }
13781 }
13782 
13783 /* Remove register number REGNO from the dead registers list of INSN.
13784 
13785    Return the note used to record the death, if there was one.  */
13786 
13787 rtx
13788 remove_death (unsigned int regno, rtx_insn *insn)
13789 {
13790   rtx note = find_regno_note (insn, REG_DEAD, regno);
13791 
13792   if (note)
13793     remove_note (insn, note);
13794 
13795   return note;
13796 }
13797 
13798 /* For each register (hardware or pseudo) used within expression X, if its
13799    death is in an instruction with luid between FROM_LUID (inclusive) and
13800    TO_INSN (exclusive), put a REG_DEAD note for that register in the
13801    list headed by PNOTES.
13802 
13803    That said, don't move registers killed by maybe_kill_insn.
13804 
13805    This is done when X is being merged by combination into TO_INSN.  These
13806    notes will then be distributed as needed.  */
13807 
13808 static void
13809 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13810 	     rtx *pnotes)
13811 {
13812   const char *fmt;
13813   int len, i;
13814   enum rtx_code code = GET_CODE (x);
13815 
13816   if (code == REG)
13817     {
13818       unsigned int regno = REGNO (x);
13819       rtx_insn *where_dead = reg_stat[regno].last_death;
13820 
13821       /* Don't move the register if it gets killed in between from and to.  */
13822       if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13823 	  && ! reg_referenced_p (x, maybe_kill_insn))
13824 	return;
13825 
13826       if (where_dead
13827 	  && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13828 	  && DF_INSN_LUID (where_dead) >= from_luid
13829 	  && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13830 	{
13831 	  rtx note = remove_death (regno, where_dead);
13832 
13833 	  /* It is possible for the call above to return 0.  This can occur
13834 	     when last_death points to I2 or I1 that we combined with.
13835 	     In that case make a new note.
13836 
13837 	     We must also check for the case where X is a hard register
13838 	     and NOTE is a death note for a range of hard registers
13839 	     including X.  In that case, we must put REG_DEAD notes for
13840 	     the remaining registers in place of NOTE.  */
13841 
13842 	  if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13843 	      && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13844 		  > GET_MODE_SIZE (GET_MODE (x))))
13845 	    {
13846 	      unsigned int deadregno = REGNO (XEXP (note, 0));
13847 	      unsigned int deadend = END_REGNO (XEXP (note, 0));
13848 	      unsigned int ourend = END_REGNO (x);
13849 	      unsigned int i;
13850 
13851 	      for (i = deadregno; i < deadend; i++)
13852 		if (i < regno || i >= ourend)
13853 		  add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13854 	    }
13855 
13856 	  /* If we didn't find any note, or if we found a REG_DEAD note that
13857 	     covers only part of the given reg, and we have a multi-reg hard
13858 	     register, then to be safe we must check for REG_DEAD notes
13859 	     for each register other than the first.  They could have
13860 	     their own REG_DEAD notes lying around.  */
13861 	  else if ((note == 0
13862 		    || (note != 0
13863 			&& (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13864 			    < GET_MODE_SIZE (GET_MODE (x)))))
13865 		   && regno < FIRST_PSEUDO_REGISTER
13866 		   && REG_NREGS (x) > 1)
13867 	    {
13868 	      unsigned int ourend = END_REGNO (x);
13869 	      unsigned int i, offset;
13870 	      rtx oldnotes = 0;
13871 
13872 	      if (note)
13873 		offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13874 	      else
13875 		offset = 1;
13876 
13877 	      for (i = regno + offset; i < ourend; i++)
13878 		move_deaths (regno_reg_rtx[i],
13879 			     maybe_kill_insn, from_luid, to_insn, &oldnotes);
13880 	    }
13881 
13882 	  if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13883 	    {
13884 	      XEXP (note, 1) = *pnotes;
13885 	      *pnotes = note;
13886 	    }
13887 	  else
13888 	    *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13889 	}
13890 
13891       return;
13892     }
13893 
13894   else if (GET_CODE (x) == SET)
13895     {
13896       rtx dest = SET_DEST (x);
13897 
13898       move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13899 
13900       /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13901 	 that accesses one word of a multi-word item, some
13902 	 piece of everything register in the expression is used by
13903 	 this insn, so remove any old death.  */
13904       /* ??? So why do we test for equality of the sizes?  */
13905 
13906       if (GET_CODE (dest) == ZERO_EXTRACT
13907 	  || GET_CODE (dest) == STRICT_LOW_PART
13908 	  || (GET_CODE (dest) == SUBREG
13909 	      && (((GET_MODE_SIZE (GET_MODE (dest))
13910 		    + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13911 		  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13912 		       + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13913 	{
13914 	  move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13915 	  return;
13916 	}
13917 
13918       /* If this is some other SUBREG, we know it replaces the entire
13919 	 value, so use that as the destination.  */
13920       if (GET_CODE (dest) == SUBREG)
13921 	dest = SUBREG_REG (dest);
13922 
13923       /* If this is a MEM, adjust deaths of anything used in the address.
13924 	 For a REG (the only other possibility), the entire value is
13925 	 being replaced so the old value is not used in this insn.  */
13926 
13927       if (MEM_P (dest))
13928 	move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13929 		     to_insn, pnotes);
13930       return;
13931     }
13932 
13933   else if (GET_CODE (x) == CLOBBER)
13934     return;
13935 
13936   len = GET_RTX_LENGTH (code);
13937   fmt = GET_RTX_FORMAT (code);
13938 
13939   for (i = 0; i < len; i++)
13940     {
13941       if (fmt[i] == 'E')
13942 	{
13943 	  int j;
13944 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13945 	    move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13946 			 to_insn, pnotes);
13947 	}
13948       else if (fmt[i] == 'e')
13949 	move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13950     }
13951 }
13952 
13953 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13954    pattern of an insn.  X must be a REG.  */
13955 
13956 static int
13957 reg_bitfield_target_p (rtx x, rtx body)
13958 {
13959   int i;
13960 
13961   if (GET_CODE (body) == SET)
13962     {
13963       rtx dest = SET_DEST (body);
13964       rtx target;
13965       unsigned int regno, tregno, endregno, endtregno;
13966 
13967       if (GET_CODE (dest) == ZERO_EXTRACT)
13968 	target = XEXP (dest, 0);
13969       else if (GET_CODE (dest) == STRICT_LOW_PART)
13970 	target = SUBREG_REG (XEXP (dest, 0));
13971       else
13972 	return 0;
13973 
13974       if (GET_CODE (target) == SUBREG)
13975 	target = SUBREG_REG (target);
13976 
13977       if (!REG_P (target))
13978 	return 0;
13979 
13980       tregno = REGNO (target), regno = REGNO (x);
13981       if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13982 	return target == x;
13983 
13984       endtregno = end_hard_regno (GET_MODE (target), tregno);
13985       endregno = end_hard_regno (GET_MODE (x), regno);
13986 
13987       return endregno > tregno && regno < endtregno;
13988     }
13989 
13990   else if (GET_CODE (body) == PARALLEL)
13991     for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13992       if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13993 	return 1;
13994 
13995   return 0;
13996 }
13997 
13998 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13999    as appropriate.  I3 and I2 are the insns resulting from the combination
14000    insns including FROM (I2 may be zero).
14001 
14002    ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14003    not need REG_DEAD notes because they are being substituted for.  This
14004    saves searching in the most common cases.
14005 
14006    Each note in the list is either ignored or placed on some insns, depending
14007    on the type of note.  */
14008 
14009 static void
14010 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14011 		  rtx elim_i2, rtx elim_i1, rtx elim_i0)
14012 {
14013   rtx note, next_note;
14014   rtx tem_note;
14015   rtx_insn *tem_insn;
14016 
14017   for (note = notes; note; note = next_note)
14018     {
14019       rtx_insn *place = 0, *place2 = 0;
14020 
14021       next_note = XEXP (note, 1);
14022       switch (REG_NOTE_KIND (note))
14023 	{
14024 	case REG_BR_PROB:
14025 	case REG_BR_PRED:
14026 	  /* Doesn't matter much where we put this, as long as it's somewhere.
14027 	     It is preferable to keep these notes on branches, which is most
14028 	     likely to be i3.  */
14029 	  place = i3;
14030 	  break;
14031 
14032 	case REG_NON_LOCAL_GOTO:
14033 	  if (JUMP_P (i3))
14034 	    place = i3;
14035 	  else
14036 	    {
14037 	      gcc_assert (i2 && JUMP_P (i2));
14038 	      place = i2;
14039 	    }
14040 	  break;
14041 
14042 	case REG_EH_REGION:
14043 	  /* These notes must remain with the call or trapping instruction.  */
14044 	  if (CALL_P (i3))
14045 	    place = i3;
14046 	  else if (i2 && CALL_P (i2))
14047 	    place = i2;
14048 	  else
14049 	    {
14050 	      gcc_assert (cfun->can_throw_non_call_exceptions);
14051 	      if (may_trap_p (i3))
14052 		place = i3;
14053 	      else if (i2 && may_trap_p (i2))
14054 		place = i2;
14055 	      /* ??? Otherwise assume we've combined things such that we
14056 		 can now prove that the instructions can't trap.  Drop the
14057 		 note in this case.  */
14058 	    }
14059 	  break;
14060 
14061 	case REG_ARGS_SIZE:
14062 	  /* ??? How to distribute between i3-i1.  Assume i3 contains the
14063 	     entire adjustment.  Assert i3 contains at least some adjust.  */
14064 	  if (!noop_move_p (i3))
14065 	    {
14066 	      int old_size, args_size = INTVAL (XEXP (note, 0));
14067 	      /* fixup_args_size_notes looks at REG_NORETURN note,
14068 		 so ensure the note is placed there first.  */
14069 	      if (CALL_P (i3))
14070 		{
14071 		  rtx *np;
14072 		  for (np = &next_note; *np; np = &XEXP (*np, 1))
14073 		    if (REG_NOTE_KIND (*np) == REG_NORETURN)
14074 		      {
14075 			rtx n = *np;
14076 			*np = XEXP (n, 1);
14077 			XEXP (n, 1) = REG_NOTES (i3);
14078 			REG_NOTES (i3) = n;
14079 			break;
14080 		      }
14081 		}
14082 	      old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14083 	      /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14084 		 REG_ARGS_SIZE note to all noreturn calls, allow that here.  */
14085 	      gcc_assert (old_size != args_size
14086 			  || (CALL_P (i3)
14087 			      && !ACCUMULATE_OUTGOING_ARGS
14088 			      && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14089 	    }
14090 	  break;
14091 
14092 	case REG_NORETURN:
14093 	case REG_SETJMP:
14094 	case REG_TM:
14095 	case REG_CALL_DECL:
14096 	  /* These notes must remain with the call.  It should not be
14097 	     possible for both I2 and I3 to be a call.  */
14098 	  if (CALL_P (i3))
14099 	    place = i3;
14100 	  else
14101 	    {
14102 	      gcc_assert (i2 && CALL_P (i2));
14103 	      place = i2;
14104 	    }
14105 	  break;
14106 
14107 	case REG_UNUSED:
14108 	  /* Any clobbers for i3 may still exist, and so we must process
14109 	     REG_UNUSED notes from that insn.
14110 
14111 	     Any clobbers from i2 or i1 can only exist if they were added by
14112 	     recog_for_combine.  In that case, recog_for_combine created the
14113 	     necessary REG_UNUSED notes.  Trying to keep any original
14114 	     REG_UNUSED notes from these insns can cause incorrect output
14115 	     if it is for the same register as the original i3 dest.
14116 	     In that case, we will notice that the register is set in i3,
14117 	     and then add a REG_UNUSED note for the destination of i3, which
14118 	     is wrong.  However, it is possible to have REG_UNUSED notes from
14119 	     i2 or i1 for register which were both used and clobbered, so
14120 	     we keep notes from i2 or i1 if they will turn into REG_DEAD
14121 	     notes.  */
14122 
14123 	  /* If this register is set or clobbered in I3, put the note there
14124 	     unless there is one already.  */
14125 	  if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14126 	    {
14127 	      if (from_insn != i3)
14128 		break;
14129 
14130 	      if (! (REG_P (XEXP (note, 0))
14131 		     ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14132 		     : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14133 		place = i3;
14134 	    }
14135 	  /* Otherwise, if this register is used by I3, then this register
14136 	     now dies here, so we must put a REG_DEAD note here unless there
14137 	     is one already.  */
14138 	  else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14139 		   && ! (REG_P (XEXP (note, 0))
14140 			 ? find_regno_note (i3, REG_DEAD,
14141 					    REGNO (XEXP (note, 0)))
14142 			 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14143 	    {
14144 	      PUT_REG_NOTE_KIND (note, REG_DEAD);
14145 	      place = i3;
14146 	    }
14147 	  break;
14148 
14149 	case REG_EQUAL:
14150 	case REG_EQUIV:
14151 	case REG_NOALIAS:
14152 	  /* These notes say something about results of an insn.  We can
14153 	     only support them if they used to be on I3 in which case they
14154 	     remain on I3.  Otherwise they are ignored.
14155 
14156 	     If the note refers to an expression that is not a constant, we
14157 	     must also ignore the note since we cannot tell whether the
14158 	     equivalence is still true.  It might be possible to do
14159 	     slightly better than this (we only have a problem if I2DEST
14160 	     or I1DEST is present in the expression), but it doesn't
14161 	     seem worth the trouble.  */
14162 
14163 	  if (from_insn == i3
14164 	      && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14165 	    place = i3;
14166 	  break;
14167 
14168 	case REG_INC:
14169 	  /* These notes say something about how a register is used.  They must
14170 	     be present on any use of the register in I2 or I3.  */
14171 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14172 	    place = i3;
14173 
14174 	  if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14175 	    {
14176 	      if (place)
14177 		place2 = i2;
14178 	      else
14179 		place = i2;
14180 	    }
14181 	  break;
14182 
14183 	case REG_LABEL_TARGET:
14184 	case REG_LABEL_OPERAND:
14185 	  /* This can show up in several ways -- either directly in the
14186 	     pattern, or hidden off in the constant pool with (or without?)
14187 	     a REG_EQUAL note.  */
14188 	  /* ??? Ignore the without-reg_equal-note problem for now.  */
14189 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14190 	      || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14191 		  && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14192 		  && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14193 	    place = i3;
14194 
14195 	  if (i2
14196 	      && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14197 		  || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14198 		      && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14199 		      && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14200 	    {
14201 	      if (place)
14202 		place2 = i2;
14203 	      else
14204 		place = i2;
14205 	    }
14206 
14207 	  /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14208 	     as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14209 	     there.  */
14210 	  if (place && JUMP_P (place)
14211 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14212 	      && (JUMP_LABEL (place) == NULL
14213 		  || JUMP_LABEL (place) == XEXP (note, 0)))
14214 	    {
14215 	      rtx label = JUMP_LABEL (place);
14216 
14217 	      if (!label)
14218 		JUMP_LABEL (place) = XEXP (note, 0);
14219 	      else if (LABEL_P (label))
14220 		LABEL_NUSES (label)--;
14221 	    }
14222 
14223 	  if (place2 && JUMP_P (place2)
14224 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14225 	      && (JUMP_LABEL (place2) == NULL
14226 		  || JUMP_LABEL (place2) == XEXP (note, 0)))
14227 	    {
14228 	      rtx label = JUMP_LABEL (place2);
14229 
14230 	      if (!label)
14231 		JUMP_LABEL (place2) = XEXP (note, 0);
14232 	      else if (LABEL_P (label))
14233 		LABEL_NUSES (label)--;
14234 	      place2 = 0;
14235 	    }
14236 	  break;
14237 
14238 	case REG_NONNEG:
14239 	  /* This note says something about the value of a register prior
14240 	     to the execution of an insn.  It is too much trouble to see
14241 	     if the note is still correct in all situations.  It is better
14242 	     to simply delete it.  */
14243 	  break;
14244 
14245 	case REG_DEAD:
14246 	  /* If we replaced the right hand side of FROM_INSN with a
14247 	     REG_EQUAL note, the original use of the dying register
14248 	     will not have been combined into I3 and I2.  In such cases,
14249 	     FROM_INSN is guaranteed to be the first of the combined
14250 	     instructions, so we simply need to search back before
14251 	     FROM_INSN for the previous use or set of this register,
14252 	     then alter the notes there appropriately.
14253 
14254 	     If the register is used as an input in I3, it dies there.
14255 	     Similarly for I2, if it is nonzero and adjacent to I3.
14256 
14257 	     If the register is not used as an input in either I3 or I2
14258 	     and it is not one of the registers we were supposed to eliminate,
14259 	     there are two possibilities.  We might have a non-adjacent I2
14260 	     or we might have somehow eliminated an additional register
14261 	     from a computation.  For example, we might have had A & B where
14262 	     we discover that B will always be zero.  In this case we will
14263 	     eliminate the reference to A.
14264 
14265 	     In both cases, we must search to see if we can find a previous
14266 	     use of A and put the death note there.  */
14267 
14268 	  if (from_insn
14269 	      && from_insn == i2mod
14270 	      && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14271 	    tem_insn = from_insn;
14272 	  else
14273 	    {
14274 	      if (from_insn
14275 		  && CALL_P (from_insn)
14276 		  && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14277 		place = from_insn;
14278 	      else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14279 		{
14280 		  /* If the new I2 sets the same register that is marked
14281 		     dead in the note, we do not in general know where to
14282 		     put the note.  One important case we _can_ handle is
14283 		     when the note comes from I3.  */
14284 		  if (from_insn == i3)
14285 		    place = i3;
14286 		  else
14287 		    break;
14288 		}
14289 	      else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14290 		place = i3;
14291 	      else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14292 		       && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14293 		place = i2;
14294 	      else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14295 			&& !(i2mod
14296 			     && reg_overlap_mentioned_p (XEXP (note, 0),
14297 							 i2mod_old_rhs)))
14298 		       || rtx_equal_p (XEXP (note, 0), elim_i1)
14299 		       || rtx_equal_p (XEXP (note, 0), elim_i0))
14300 		break;
14301 	      tem_insn = i3;
14302 	    }
14303 
14304 	  if (place == 0)
14305 	    {
14306 	      basic_block bb = this_basic_block;
14307 
14308 	      for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14309 		{
14310 		  if (!NONDEBUG_INSN_P (tem_insn))
14311 		    {
14312 		      if (tem_insn == BB_HEAD (bb))
14313 			break;
14314 		      continue;
14315 		    }
14316 
14317 		  /* If the register is being set at TEM_INSN, see if that is all
14318 		     TEM_INSN is doing.  If so, delete TEM_INSN.  Otherwise, make this
14319 		     into a REG_UNUSED note instead. Don't delete sets to
14320 		     global register vars.  */
14321 		  if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14322 		       || !global_regs[REGNO (XEXP (note, 0))])
14323 		      && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14324 		    {
14325 		      rtx set = single_set (tem_insn);
14326 		      rtx inner_dest = 0;
14327 		      rtx_insn *cc0_setter = NULL;
14328 
14329 		      if (set != 0)
14330 			for (inner_dest = SET_DEST (set);
14331 			     (GET_CODE (inner_dest) == STRICT_LOW_PART
14332 			      || GET_CODE (inner_dest) == SUBREG
14333 			      || GET_CODE (inner_dest) == ZERO_EXTRACT);
14334 			     inner_dest = XEXP (inner_dest, 0))
14335 			  ;
14336 
14337 		      /* Verify that it was the set, and not a clobber that
14338 			 modified the register.
14339 
14340 			 CC0 targets must be careful to maintain setter/user
14341 			 pairs.  If we cannot delete the setter due to side
14342 			 effects, mark the user with an UNUSED note instead
14343 			 of deleting it.  */
14344 
14345 		      if (set != 0 && ! side_effects_p (SET_SRC (set))
14346 			  && rtx_equal_p (XEXP (note, 0), inner_dest)
14347 			  && (!HAVE_cc0
14348 			      || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14349 				  || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14350 				      && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14351 			{
14352 			  /* Move the notes and links of TEM_INSN elsewhere.
14353 			     This might delete other dead insns recursively.
14354 			     First set the pattern to something that won't use
14355 			     any register.  */
14356 			  rtx old_notes = REG_NOTES (tem_insn);
14357 
14358 			  PATTERN (tem_insn) = pc_rtx;
14359 			  REG_NOTES (tem_insn) = NULL;
14360 
14361 			  distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14362 					    NULL_RTX, NULL_RTX, NULL_RTX);
14363 			  distribute_links (LOG_LINKS (tem_insn));
14364 
14365 			  unsigned int regno = REGNO (XEXP (note, 0));
14366 			  reg_stat_type *rsp = &reg_stat[regno];
14367 			  if (rsp->last_set == tem_insn)
14368 			    record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14369 
14370 			  SET_INSN_DELETED (tem_insn);
14371 			  if (tem_insn == i2)
14372 			    i2 = NULL;
14373 
14374 			  /* Delete the setter too.  */
14375 			  if (cc0_setter)
14376 			    {
14377 			      PATTERN (cc0_setter) = pc_rtx;
14378 			      old_notes = REG_NOTES (cc0_setter);
14379 			      REG_NOTES (cc0_setter) = NULL;
14380 
14381 			      distribute_notes (old_notes, cc0_setter,
14382 						cc0_setter, NULL,
14383 						NULL_RTX, NULL_RTX, NULL_RTX);
14384 			      distribute_links (LOG_LINKS (cc0_setter));
14385 
14386 			      SET_INSN_DELETED (cc0_setter);
14387 			      if (cc0_setter == i2)
14388 				i2 = NULL;
14389 			    }
14390 			}
14391 		      else
14392 			{
14393 			  PUT_REG_NOTE_KIND (note, REG_UNUSED);
14394 
14395 			  /*  If there isn't already a REG_UNUSED note, put one
14396 			      here.  Do not place a REG_DEAD note, even if
14397 			      the register is also used here; that would not
14398 			      match the algorithm used in lifetime analysis
14399 			      and can cause the consistency check in the
14400 			      scheduler to fail.  */
14401 			  if (! find_regno_note (tem_insn, REG_UNUSED,
14402 						 REGNO (XEXP (note, 0))))
14403 			    place = tem_insn;
14404 			  break;
14405 			}
14406 		    }
14407 		  else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14408 			   || (CALL_P (tem_insn)
14409 			       && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14410 		    {
14411 		      place = tem_insn;
14412 
14413 		      /* If we are doing a 3->2 combination, and we have a
14414 			 register which formerly died in i3 and was not used
14415 			 by i2, which now no longer dies in i3 and is used in
14416 			 i2 but does not die in i2, and place is between i2
14417 			 and i3, then we may need to move a link from place to
14418 			 i2.  */
14419 		      if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14420 			  && from_insn
14421 			  && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14422 			  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14423 			{
14424 			  struct insn_link *links = LOG_LINKS (place);
14425 			  LOG_LINKS (place) = NULL;
14426 			  distribute_links (links);
14427 			}
14428 		      break;
14429 		    }
14430 
14431 		  if (tem_insn == BB_HEAD (bb))
14432 		    break;
14433 		}
14434 
14435 	    }
14436 
14437 	  /* If the register is set or already dead at PLACE, we needn't do
14438 	     anything with this note if it is still a REG_DEAD note.
14439 	     We check here if it is set at all, not if is it totally replaced,
14440 	     which is what `dead_or_set_p' checks, so also check for it being
14441 	     set partially.  */
14442 
14443 	  if (place && REG_NOTE_KIND (note) == REG_DEAD)
14444 	    {
14445 	      unsigned int regno = REGNO (XEXP (note, 0));
14446 	      reg_stat_type *rsp = &reg_stat[regno];
14447 
14448 	      if (dead_or_set_p (place, XEXP (note, 0))
14449 		  || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14450 		{
14451 		  /* Unless the register previously died in PLACE, clear
14452 		     last_death.  [I no longer understand why this is
14453 		     being done.] */
14454 		  if (rsp->last_death != place)
14455 		    rsp->last_death = 0;
14456 		  place = 0;
14457 		}
14458 	      else
14459 		rsp->last_death = place;
14460 
14461 	      /* If this is a death note for a hard reg that is occupying
14462 		 multiple registers, ensure that we are still using all
14463 		 parts of the object.  If we find a piece of the object
14464 		 that is unused, we must arrange for an appropriate REG_DEAD
14465 		 note to be added for it.  However, we can't just emit a USE
14466 		 and tag the note to it, since the register might actually
14467 		 be dead; so we recourse, and the recursive call then finds
14468 		 the previous insn that used this register.  */
14469 
14470 	      if (place && REG_NREGS (XEXP (note, 0)) > 1)
14471 		{
14472 		  unsigned int endregno = END_REGNO (XEXP (note, 0));
14473 		  bool all_used = true;
14474 		  unsigned int i;
14475 
14476 		  for (i = regno; i < endregno; i++)
14477 		    if ((! refers_to_regno_p (i, PATTERN (place))
14478 			 && ! find_regno_fusage (place, USE, i))
14479 			|| dead_or_set_regno_p (place, i))
14480 		      {
14481 			all_used = false;
14482 			break;
14483 		      }
14484 
14485 		  if (! all_used)
14486 		    {
14487 		      /* Put only REG_DEAD notes for pieces that are
14488 			 not already dead or set.  */
14489 
14490 		      for (i = regno; i < endregno;
14491 			   i += hard_regno_nregs[i][reg_raw_mode[i]])
14492 			{
14493 			  rtx piece = regno_reg_rtx[i];
14494 			  basic_block bb = this_basic_block;
14495 
14496 			  if (! dead_or_set_p (place, piece)
14497 			      && ! reg_bitfield_target_p (piece,
14498 							  PATTERN (place)))
14499 			    {
14500 			      rtx new_note = alloc_reg_note (REG_DEAD, piece,
14501 							     NULL_RTX);
14502 
14503 			      distribute_notes (new_note, place, place,
14504 						NULL, NULL_RTX, NULL_RTX,
14505 						NULL_RTX);
14506 			    }
14507 			  else if (! refers_to_regno_p (i, PATTERN (place))
14508 				   && ! find_regno_fusage (place, USE, i))
14509 			    for (tem_insn = PREV_INSN (place); ;
14510 				 tem_insn = PREV_INSN (tem_insn))
14511 			      {
14512 				if (!NONDEBUG_INSN_P (tem_insn))
14513 				  {
14514 				    if (tem_insn == BB_HEAD (bb))
14515 			 	      break;
14516 				    continue;
14517 				  }
14518 				if (dead_or_set_p (tem_insn, piece)
14519 				    || reg_bitfield_target_p (piece,
14520 							      PATTERN (tem_insn)))
14521 				  {
14522 				    add_reg_note (tem_insn, REG_UNUSED, piece);
14523 				    break;
14524 				  }
14525 			      }
14526 			}
14527 
14528 		      place = 0;
14529 		    }
14530 		}
14531 	    }
14532 	  break;
14533 
14534 	default:
14535 	  /* Any other notes should not be present at this point in the
14536 	     compilation.  */
14537 	  gcc_unreachable ();
14538 	}
14539 
14540       if (place)
14541 	{
14542 	  XEXP (note, 1) = REG_NOTES (place);
14543 	  REG_NOTES (place) = note;
14544 	}
14545 
14546       if (place2)
14547 	add_shallow_copy_of_reg_note (place2, note);
14548     }
14549 }
14550 
14551 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14552    I3, I2, and I1 to new locations.  This is also called to add a link
14553    pointing at I3 when I3's destination is changed.  */
14554 
14555 static void
14556 distribute_links (struct insn_link *links)
14557 {
14558   struct insn_link *link, *next_link;
14559 
14560   for (link = links; link; link = next_link)
14561     {
14562       rtx_insn *place = 0;
14563       rtx_insn *insn;
14564       rtx set, reg;
14565 
14566       next_link = link->next;
14567 
14568       /* If the insn that this link points to is a NOTE, ignore it.  */
14569       if (NOTE_P (link->insn))
14570 	continue;
14571 
14572       set = 0;
14573       rtx pat = PATTERN (link->insn);
14574       if (GET_CODE (pat) == SET)
14575 	set = pat;
14576       else if (GET_CODE (pat) == PARALLEL)
14577 	{
14578 	  int i;
14579 	  for (i = 0; i < XVECLEN (pat, 0); i++)
14580 	    {
14581 	      set = XVECEXP (pat, 0, i);
14582 	      if (GET_CODE (set) != SET)
14583 		continue;
14584 
14585 	      reg = SET_DEST (set);
14586 	      while (GET_CODE (reg) == ZERO_EXTRACT
14587 		     || GET_CODE (reg) == STRICT_LOW_PART
14588 		     || GET_CODE (reg) == SUBREG)
14589 		reg = XEXP (reg, 0);
14590 
14591 	      if (!REG_P (reg))
14592 		continue;
14593 
14594 	      if (REGNO (reg) == link->regno)
14595 		break;
14596 	    }
14597 	  if (i == XVECLEN (pat, 0))
14598 	    continue;
14599 	}
14600       else
14601 	continue;
14602 
14603       reg = SET_DEST (set);
14604 
14605       while (GET_CODE (reg) == ZERO_EXTRACT
14606 	     || GET_CODE (reg) == STRICT_LOW_PART
14607 	     || GET_CODE (reg) == SUBREG)
14608 	reg = XEXP (reg, 0);
14609 
14610       /* A LOG_LINK is defined as being placed on the first insn that uses
14611 	 a register and points to the insn that sets the register.  Start
14612 	 searching at the next insn after the target of the link and stop
14613 	 when we reach a set of the register or the end of the basic block.
14614 
14615 	 Note that this correctly handles the link that used to point from
14616 	 I3 to I2.  Also note that not much searching is typically done here
14617 	 since most links don't point very far away.  */
14618 
14619       for (insn = NEXT_INSN (link->insn);
14620 	   (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14621 		     || BB_HEAD (this_basic_block->next_bb) != insn));
14622 	   insn = NEXT_INSN (insn))
14623 	if (DEBUG_INSN_P (insn))
14624 	  continue;
14625 	else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14626 	  {
14627 	    if (reg_referenced_p (reg, PATTERN (insn)))
14628 	      place = insn;
14629 	    break;
14630 	  }
14631 	else if (CALL_P (insn)
14632 		 && find_reg_fusage (insn, USE, reg))
14633 	  {
14634 	    place = insn;
14635 	    break;
14636 	  }
14637 	else if (INSN_P (insn) && reg_set_p (reg, insn))
14638 	  break;
14639 
14640       /* If we found a place to put the link, place it there unless there
14641 	 is already a link to the same insn as LINK at that point.  */
14642 
14643       if (place)
14644 	{
14645 	  struct insn_link *link2;
14646 
14647 	  FOR_EACH_LOG_LINK (link2, place)
14648 	    if (link2->insn == link->insn && link2->regno == link->regno)
14649 	      break;
14650 
14651 	  if (link2 == NULL)
14652 	    {
14653 	      link->next = LOG_LINKS (place);
14654 	      LOG_LINKS (place) = link;
14655 
14656 	      /* Set added_links_insn to the earliest insn we added a
14657 		 link to.  */
14658 	      if (added_links_insn == 0
14659 		  || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14660 		added_links_insn = place;
14661 	    }
14662 	}
14663     }
14664 }
14665 
14666 /* Check for any register or memory mentioned in EQUIV that is not
14667    mentioned in EXPR.  This is used to restrict EQUIV to "specializations"
14668    of EXPR where some registers may have been replaced by constants.  */
14669 
14670 static bool
14671 unmentioned_reg_p (rtx equiv, rtx expr)
14672 {
14673   subrtx_iterator::array_type array;
14674   FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14675     {
14676       const_rtx x = *iter;
14677       if ((REG_P (x) || MEM_P (x))
14678 	  && !reg_mentioned_p (x, expr))
14679 	return true;
14680     }
14681   return false;
14682 }
14683 
14684 DEBUG_FUNCTION void
14685 dump_combine_stats (FILE *file)
14686 {
14687   fprintf
14688     (file,
14689      ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14690      combine_attempts, combine_merges, combine_extras, combine_successes);
14691 }
14692 
14693 void
14694 dump_combine_total_stats (FILE *file)
14695 {
14696   fprintf
14697     (file,
14698      "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14699      total_attempts, total_merges, total_extras, total_successes);
14700 }
14701 
14702 /* Try combining insns through substitution.  */
14703 static unsigned int
14704 rest_of_handle_combine (void)
14705 {
14706   int rebuild_jump_labels_after_combine;
14707 
14708   df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14709   df_note_add_problem ();
14710   df_analyze ();
14711 
14712   regstat_init_n_sets_and_refs ();
14713   reg_n_sets_max = max_reg_num ();
14714 
14715   rebuild_jump_labels_after_combine
14716     = combine_instructions (get_insns (), max_reg_num ());
14717 
14718   /* Combining insns may have turned an indirect jump into a
14719      direct jump.  Rebuild the JUMP_LABEL fields of jumping
14720      instructions.  */
14721   if (rebuild_jump_labels_after_combine)
14722     {
14723       if (dom_info_available_p (CDI_DOMINATORS))
14724 	free_dominance_info (CDI_DOMINATORS);
14725       timevar_push (TV_JUMP);
14726       rebuild_jump_labels (get_insns ());
14727       cleanup_cfg (0);
14728       timevar_pop (TV_JUMP);
14729     }
14730 
14731   regstat_free_n_sets_and_refs ();
14732   return 0;
14733 }
14734 
14735 namespace {
14736 
14737 const pass_data pass_data_combine =
14738 {
14739   RTL_PASS, /* type */
14740   "combine", /* name */
14741   OPTGROUP_NONE, /* optinfo_flags */
14742   TV_COMBINE, /* tv_id */
14743   PROP_cfglayout, /* properties_required */
14744   0, /* properties_provided */
14745   0, /* properties_destroyed */
14746   0, /* todo_flags_start */
14747   TODO_df_finish, /* todo_flags_finish */
14748 };
14749 
14750 class pass_combine : public rtl_opt_pass
14751 {
14752 public:
14753   pass_combine (gcc::context *ctxt)
14754     : rtl_opt_pass (pass_data_combine, ctxt)
14755   {}
14756 
14757   /* opt_pass methods: */
14758   virtual bool gate (function *) { return (optimize > 0); }
14759   virtual unsigned int execute (function *)
14760     {
14761       return rest_of_handle_combine ();
14762     }
14763 
14764 }; // class pass_combine
14765 
14766 } // anon namespace
14767 
14768 rtl_opt_pass *
14769 make_pass_combine (gcc::context *ctxt)
14770 {
14771   return new pass_combine (ctxt);
14772 }
14773