xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/combine.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /* Optimize by combining instructions for GNU compiler.
2    Copyright (C) 1987-2016 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21    Portable Optimizer, but redone to work on our list-structured
22    representation for RTL instead of their string representation.
23 
24    The LOG_LINKS of each insn identify the most recent assignment
25    to each REG used in the insn.  It is a list of previous insns,
26    each of which contains a SET for a REG that is used in this insn
27    and not used or set in between.  LOG_LINKs never cross basic blocks.
28    They were set up by the preceding pass (lifetime analysis).
29 
30    We try to combine each pair of insns joined by a logical link.
31    We also try to combine triplets of insns A, B and C when C has
32    a link back to B and B has a link back to A.  Likewise for a
33    small number of quadruplets of insns A, B, C and D for which
34    there's high likelihood of success.
35 
36    LOG_LINKS does not have links for use of the CC0.  They don't
37    need to, because the insn that sets the CC0 is always immediately
38    before the insn that tests it.  So we always regard a branch
39    insn as having a logical link to the preceding insn.  The same is true
40    for an insn explicitly using CC0.
41 
42    We check (with use_crosses_set_p) to avoid combining in such a way
43    as to move a computation to a place where its value would be different.
44 
45    Combination is done by mathematically substituting the previous
46    insn(s) values for the regs they set into the expressions in
47    the later insns that refer to these regs.  If the result is a valid insn
48    for our target machine, according to the machine description,
49    we install it, delete the earlier insns, and update the data flow
50    information (LOG_LINKS and REG_NOTES) for what we did.
51 
52    There are a few exceptions where the dataflow information isn't
53    completely updated (however this is only a local issue since it is
54    regenerated before the next pass that uses it):
55 
56    - reg_live_length is not updated
57    - reg_n_refs is not adjusted in the rare case when a register is
58      no longer required in a computation
59    - there are extremely rare cases (see distribute_notes) when a
60      REG_DEAD note is lost
61    - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62      removed because there is no way to know which register it was
63      linking
64 
65    To simplify substitution, we combine only when the earlier insn(s)
66    consist of only a single assignment.  To simplify updating afterward,
67    we never combine when a subroutine call appears in the middle.
68 
69    Since we do not represent assignments to CC0 explicitly except when that
70    is all an insn does, there is no LOG_LINKS entry in an insn that uses
71    the condition code for the insn that set the condition code.
72    Fortunately, these two insns must be consecutive.
73    Therefore, every JUMP_INSN is taken to have an implicit logical link
74    to the preceding insn.  This is not quite right, since non-jumps can
75    also use the condition code; but in practice such insns would not
76    combine anyway.  */
77 
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "predict.h"
86 #include "df.h"
87 #include "tm_p.h"
88 #include "optabs.h"
89 #include "regs.h"
90 #include "emit-rtl.h"
91 #include "recog.h"
92 #include "cgraph.h"
93 #include "stor-layout.h"
94 #include "cfgrtl.h"
95 #include "cfgcleanup.h"
96 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
97 #include "explow.h"
98 #include "insn-attr.h"
99 #include "rtlhooks-def.h"
100 #include "params.h"
101 #include "tree-pass.h"
102 #include "valtrack.h"
103 #include "rtl-iter.h"
104 #include "print-rtl.h"
105 
106 #ifndef LOAD_EXTEND_OP
107 #define LOAD_EXTEND_OP(M) UNKNOWN
108 #endif
109 
110 /* Number of attempts to combine instructions in this function.  */
111 
112 static int combine_attempts;
113 
114 /* Number of attempts that got as far as substitution in this function.  */
115 
116 static int combine_merges;
117 
118 /* Number of instructions combined with added SETs in this function.  */
119 
120 static int combine_extras;
121 
122 /* Number of instructions combined in this function.  */
123 
124 static int combine_successes;
125 
126 /* Totals over entire compilation.  */
127 
128 static int total_attempts, total_merges, total_extras, total_successes;
129 
130 /* combine_instructions may try to replace the right hand side of the
131    second instruction with the value of an associated REG_EQUAL note
132    before throwing it at try_combine.  That is problematic when there
133    is a REG_DEAD note for a register used in the old right hand side
134    and can cause distribute_notes to do wrong things.  This is the
135    second instruction if it has been so modified, null otherwise.  */
136 
137 static rtx_insn *i2mod;
138 
139 /* When I2MOD is nonnull, this is a copy of the old right hand side.  */
140 
141 static rtx i2mod_old_rhs;
142 
143 /* When I2MOD is nonnull, this is a copy of the new right hand side.  */
144 
145 static rtx i2mod_new_rhs;
146 
147 struct reg_stat_type {
148   /* Record last point of death of (hard or pseudo) register n.  */
149   rtx_insn			*last_death;
150 
151   /* Record last point of modification of (hard or pseudo) register n.  */
152   rtx_insn			*last_set;
153 
154   /* The next group of fields allows the recording of the last value assigned
155      to (hard or pseudo) register n.  We use this information to see if an
156      operation being processed is redundant given a prior operation performed
157      on the register.  For example, an `and' with a constant is redundant if
158      all the zero bits are already known to be turned off.
159 
160      We use an approach similar to that used by cse, but change it in the
161      following ways:
162 
163      (1) We do not want to reinitialize at each label.
164      (2) It is useful, but not critical, to know the actual value assigned
165 	 to a register.  Often just its form is helpful.
166 
167      Therefore, we maintain the following fields:
168 
169      last_set_value		the last value assigned
170      last_set_label		records the value of label_tick when the
171 				register was assigned
172      last_set_table_tick	records the value of label_tick when a
173 				value using the register is assigned
174      last_set_invalid		set to nonzero when it is not valid
175 				to use the value of this register in some
176 				register's value
177 
178      To understand the usage of these tables, it is important to understand
179      the distinction between the value in last_set_value being valid and
180      the register being validly contained in some other expression in the
181      table.
182 
183      (The next two parameters are out of date).
184 
185      reg_stat[i].last_set_value is valid if it is nonzero, and either
186      reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
187 
188      Register I may validly appear in any expression returned for the value
189      of another register if reg_n_sets[i] is 1.  It may also appear in the
190      value for register J if reg_stat[j].last_set_invalid is zero, or
191      reg_stat[i].last_set_label < reg_stat[j].last_set_label.
192 
193      If an expression is found in the table containing a register which may
194      not validly appear in an expression, the register is replaced by
195      something that won't match, (clobber (const_int 0)).  */
196 
197   /* Record last value assigned to (hard or pseudo) register n.  */
198 
199   rtx				last_set_value;
200 
201   /* Record the value of label_tick when an expression involving register n
202      is placed in last_set_value.  */
203 
204   int				last_set_table_tick;
205 
206   /* Record the value of label_tick when the value for register n is placed in
207      last_set_value.  */
208 
209   int				last_set_label;
210 
211   /* These fields are maintained in parallel with last_set_value and are
212      used to store the mode in which the register was last set, the bits
213      that were known to be zero when it was last set, and the number of
214      sign bits copies it was known to have when it was last set.  */
215 
216   unsigned HOST_WIDE_INT	last_set_nonzero_bits;
217   char				last_set_sign_bit_copies;
218   ENUM_BITFIELD(machine_mode)	last_set_mode : 8;
219 
220   /* Set nonzero if references to register n in expressions should not be
221      used.  last_set_invalid is set nonzero when this register is being
222      assigned to and last_set_table_tick == label_tick.  */
223 
224   char				last_set_invalid;
225 
226   /* Some registers that are set more than once and used in more than one
227      basic block are nevertheless always set in similar ways.  For example,
228      a QImode register may be loaded from memory in two places on a machine
229      where byte loads zero extend.
230 
231      We record in the following fields if a register has some leading bits
232      that are always equal to the sign bit, and what we know about the
233      nonzero bits of a register, specifically which bits are known to be
234      zero.
235 
236      If an entry is zero, it means that we don't know anything special.  */
237 
238   unsigned char			sign_bit_copies;
239 
240   unsigned HOST_WIDE_INT	nonzero_bits;
241 
242   /* Record the value of the label_tick when the last truncation
243      happened.  The field truncated_to_mode is only valid if
244      truncation_label == label_tick.  */
245 
246   int				truncation_label;
247 
248   /* Record the last truncation seen for this register.  If truncation
249      is not a nop to this mode we might be able to save an explicit
250      truncation if we know that value already contains a truncated
251      value.  */
252 
253   ENUM_BITFIELD(machine_mode)	truncated_to_mode : 8;
254 };
255 
256 
257 static vec<reg_stat_type> reg_stat;
258 
259 /* One plus the highest pseudo for which we track REG_N_SETS.
260    regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
261    but during combine_split_insns new pseudos can be created.  As we don't have
262    updated DF information in that case, it is hard to initialize the array
263    after growing.  The combiner only cares about REG_N_SETS (regno) == 1,
264    so instead of growing the arrays, just assume all newly created pseudos
265    during combine might be set multiple times.  */
266 
267 static unsigned int reg_n_sets_max;
268 
269 /* Record the luid of the last insn that invalidated memory
270    (anything that writes memory, and subroutine calls, but not pushes).  */
271 
272 static int mem_last_set;
273 
274 /* Record the luid of the last CALL_INSN
275    so we can tell whether a potential combination crosses any calls.  */
276 
277 static int last_call_luid;
278 
279 /* When `subst' is called, this is the insn that is being modified
280    (by combining in a previous insn).  The PATTERN of this insn
281    is still the old pattern partially modified and it should not be
282    looked at, but this may be used to examine the successors of the insn
283    to judge whether a simplification is valid.  */
284 
285 static rtx_insn *subst_insn;
286 
287 /* This is the lowest LUID that `subst' is currently dealing with.
288    get_last_value will not return a value if the register was set at or
289    after this LUID.  If not for this mechanism, we could get confused if
290    I2 or I1 in try_combine were an insn that used the old value of a register
291    to obtain a new value.  In that case, we might erroneously get the
292    new value of the register when we wanted the old one.  */
293 
294 static int subst_low_luid;
295 
296 /* This contains any hard registers that are used in newpat; reg_dead_at_p
297    must consider all these registers to be always live.  */
298 
299 static HARD_REG_SET newpat_used_regs;
300 
301 /* This is an insn to which a LOG_LINKS entry has been added.  If this
302    insn is the earlier than I2 or I3, combine should rescan starting at
303    that location.  */
304 
305 static rtx_insn *added_links_insn;
306 
307 /* Basic block in which we are performing combines.  */
308 static basic_block this_basic_block;
309 static bool optimize_this_for_speed_p;
310 
311 
312 /* Length of the currently allocated uid_insn_cost array.  */
313 
314 static int max_uid_known;
315 
316 /* The following array records the insn_rtx_cost for every insn
317    in the instruction stream.  */
318 
319 static int *uid_insn_cost;
320 
321 /* The following array records the LOG_LINKS for every insn in the
322    instruction stream as struct insn_link pointers.  */
323 
324 struct insn_link {
325   rtx_insn *insn;
326   unsigned int regno;
327   struct insn_link *next;
328 };
329 
330 static struct insn_link **uid_log_links;
331 
332 static inline int
333 insn_uid_check (const_rtx insn)
334 {
335   int uid = INSN_UID (insn);
336   gcc_checking_assert (uid <= max_uid_known);
337   return uid;
338 }
339 
340 #define INSN_COST(INSN)		(uid_insn_cost[insn_uid_check (INSN)])
341 #define LOG_LINKS(INSN)		(uid_log_links[insn_uid_check (INSN)])
342 
343 #define FOR_EACH_LOG_LINK(L, INSN)				\
344   for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
345 
346 /* Links for LOG_LINKS are allocated from this obstack.  */
347 
348 static struct obstack insn_link_obstack;
349 
350 /* Allocate a link.  */
351 
352 static inline struct insn_link *
353 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
354 {
355   struct insn_link *l
356     = (struct insn_link *) obstack_alloc (&insn_link_obstack,
357 					  sizeof (struct insn_link));
358   l->insn = insn;
359   l->regno = regno;
360   l->next = next;
361   return l;
362 }
363 
364 /* Incremented for each basic block.  */
365 
366 static int label_tick;
367 
368 /* Reset to label_tick for each extended basic block in scanning order.  */
369 
370 static int label_tick_ebb_start;
371 
372 /* Mode used to compute significance in reg_stat[].nonzero_bits.  It is the
373    largest integer mode that can fit in HOST_BITS_PER_WIDE_INT.  */
374 
375 static machine_mode nonzero_bits_mode;
376 
377 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
378    be safely used.  It is zero while computing them and after combine has
379    completed.  This former test prevents propagating values based on
380    previously set values, which can be incorrect if a variable is modified
381    in a loop.  */
382 
383 static int nonzero_sign_valid;
384 
385 
386 /* Record one modification to rtl structure
387    to be undone by storing old_contents into *where.  */
388 
389 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
390 
391 struct undo
392 {
393   struct undo *next;
394   enum undo_kind kind;
395   union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
396   union { rtx *r; int *i; struct insn_link **l; } where;
397 };
398 
399 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
400    num_undo says how many are currently recorded.
401 
402    other_insn is nonzero if we have modified some other insn in the process
403    of working on subst_insn.  It must be verified too.  */
404 
405 struct undobuf
406 {
407   struct undo *undos;
408   struct undo *frees;
409   rtx_insn *other_insn;
410 };
411 
412 static struct undobuf undobuf;
413 
414 /* Number of times the pseudo being substituted for
415    was found and replaced.  */
416 
417 static int n_occurrences;
418 
419 static rtx reg_nonzero_bits_for_combine (const_rtx, machine_mode, const_rtx,
420 					 machine_mode,
421 					 unsigned HOST_WIDE_INT,
422 					 unsigned HOST_WIDE_INT *);
423 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, machine_mode, const_rtx,
424 						machine_mode,
425 						unsigned int, unsigned int *);
426 static void do_SUBST (rtx *, rtx);
427 static void do_SUBST_INT (int *, int);
428 static void init_reg_last (void);
429 static void setup_incoming_promotions (rtx_insn *);
430 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
431 static int cant_combine_insn_p (rtx_insn *);
432 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
433 			  rtx_insn *, rtx_insn *, rtx *, rtx *);
434 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
435 static int contains_muldiv (rtx);
436 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
437 			      int *, rtx_insn *);
438 static void undo_all (void);
439 static void undo_commit (void);
440 static rtx *find_split_point (rtx *, rtx_insn *, bool);
441 static rtx subst (rtx, rtx, rtx, int, int, int);
442 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
443 static rtx simplify_if_then_else (rtx);
444 static rtx simplify_set (rtx);
445 static rtx simplify_logical (rtx);
446 static rtx expand_compound_operation (rtx);
447 static const_rtx expand_field_assignment (const_rtx);
448 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
449 			    rtx, unsigned HOST_WIDE_INT, int, int, int);
450 static rtx extract_left_shift (rtx, int);
451 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
452 			      unsigned HOST_WIDE_INT *);
453 static rtx canon_reg_for_combine (rtx, rtx);
454 static rtx force_to_mode (rtx, machine_mode,
455 			  unsigned HOST_WIDE_INT, int);
456 static rtx if_then_else_cond (rtx, rtx *, rtx *);
457 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
458 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
459 static rtx make_field_assignment (rtx);
460 static rtx apply_distributive_law (rtx);
461 static rtx distribute_and_simplify_rtx (rtx, int);
462 static rtx simplify_and_const_int_1 (machine_mode, rtx,
463 				     unsigned HOST_WIDE_INT);
464 static rtx simplify_and_const_int (rtx, machine_mode, rtx,
465 				   unsigned HOST_WIDE_INT);
466 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
467 			    HOST_WIDE_INT, machine_mode, int *);
468 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
469 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
470 				 int);
471 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
472 static rtx gen_lowpart_for_combine (machine_mode, rtx);
473 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
474 					     rtx, rtx *);
475 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
476 static void update_table_tick (rtx);
477 static void record_value_for_reg (rtx, rtx_insn *, rtx);
478 static void check_promoted_subreg (rtx_insn *, rtx);
479 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
480 static void record_dead_and_set_regs (rtx_insn *);
481 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
482 static rtx get_last_value (const_rtx);
483 static int use_crosses_set_p (const_rtx, int);
484 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
485 static int reg_dead_at_p (rtx, rtx_insn *);
486 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
487 static int reg_bitfield_target_p (rtx, rtx);
488 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
489 static void distribute_links (struct insn_link *);
490 static void mark_used_regs_combine (rtx);
491 static void record_promoted_value (rtx_insn *, rtx);
492 static bool unmentioned_reg_p (rtx, rtx);
493 static void record_truncated_values (rtx *, void *);
494 static bool reg_truncated_to_mode (machine_mode, const_rtx);
495 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
496 
497 
498 /* It is not safe to use ordinary gen_lowpart in combine.
499    See comments in gen_lowpart_for_combine.  */
500 #undef RTL_HOOKS_GEN_LOWPART
501 #define RTL_HOOKS_GEN_LOWPART              gen_lowpart_for_combine
502 
503 /* Our implementation of gen_lowpart never emits a new pseudo.  */
504 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
505 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT      gen_lowpart_for_combine
506 
507 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
508 #define RTL_HOOKS_REG_NONZERO_REG_BITS     reg_nonzero_bits_for_combine
509 
510 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
511 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES  reg_num_sign_bit_copies_for_combine
512 
513 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
514 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE    reg_truncated_to_mode
515 
516 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
517 
518 
519 /* Convenience wrapper for the canonicalize_comparison target hook.
520    Target hooks cannot use enum rtx_code.  */
521 static inline void
522 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
523 				bool op0_preserve_value)
524 {
525   int code_int = (int)*code;
526   targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
527   *code = (enum rtx_code)code_int;
528 }
529 
530 /* Try to split PATTERN found in INSN.  This returns NULL_RTX if
531    PATTERN can not be split.  Otherwise, it returns an insn sequence.
532    This is a wrapper around split_insns which ensures that the
533    reg_stat vector is made larger if the splitter creates a new
534    register.  */
535 
536 static rtx_insn *
537 combine_split_insns (rtx pattern, rtx_insn *insn)
538 {
539   rtx_insn *ret;
540   unsigned int nregs;
541 
542   ret = split_insns (pattern, insn);
543   nregs = max_reg_num ();
544   if (nregs > reg_stat.length ())
545     reg_stat.safe_grow_cleared (nregs);
546   return ret;
547 }
548 
549 /* This is used by find_single_use to locate an rtx in LOC that
550    contains exactly one use of DEST, which is typically either a REG
551    or CC0.  It returns a pointer to the innermost rtx expression
552    containing DEST.  Appearances of DEST that are being used to
553    totally replace it are not counted.  */
554 
555 static rtx *
556 find_single_use_1 (rtx dest, rtx *loc)
557 {
558   rtx x = *loc;
559   enum rtx_code code = GET_CODE (x);
560   rtx *result = NULL;
561   rtx *this_result;
562   int i;
563   const char *fmt;
564 
565   switch (code)
566     {
567     case CONST:
568     case LABEL_REF:
569     case SYMBOL_REF:
570     CASE_CONST_ANY:
571     case CLOBBER:
572       return 0;
573 
574     case SET:
575       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
576 	 of a REG that occupies all of the REG, the insn uses DEST if
577 	 it is mentioned in the destination or the source.  Otherwise, we
578 	 need just check the source.  */
579       if (GET_CODE (SET_DEST (x)) != CC0
580 	  && GET_CODE (SET_DEST (x)) != PC
581 	  && !REG_P (SET_DEST (x))
582 	  && ! (GET_CODE (SET_DEST (x)) == SUBREG
583 		&& REG_P (SUBREG_REG (SET_DEST (x)))
584 		&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
585 		      + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
586 		    == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
587 			 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
588 	break;
589 
590       return find_single_use_1 (dest, &SET_SRC (x));
591 
592     case MEM:
593     case SUBREG:
594       return find_single_use_1 (dest, &XEXP (x, 0));
595 
596     default:
597       break;
598     }
599 
600   /* If it wasn't one of the common cases above, check each expression and
601      vector of this code.  Look for a unique usage of DEST.  */
602 
603   fmt = GET_RTX_FORMAT (code);
604   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
605     {
606       if (fmt[i] == 'e')
607 	{
608 	  if (dest == XEXP (x, i)
609 	      || (REG_P (dest) && REG_P (XEXP (x, i))
610 		  && REGNO (dest) == REGNO (XEXP (x, i))))
611 	    this_result = loc;
612 	  else
613 	    this_result = find_single_use_1 (dest, &XEXP (x, i));
614 
615 	  if (result == NULL)
616 	    result = this_result;
617 	  else if (this_result)
618 	    /* Duplicate usage.  */
619 	    return NULL;
620 	}
621       else if (fmt[i] == 'E')
622 	{
623 	  int j;
624 
625 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
626 	    {
627 	      if (XVECEXP (x, i, j) == dest
628 		  || (REG_P (dest)
629 		      && REG_P (XVECEXP (x, i, j))
630 		      && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
631 		this_result = loc;
632 	      else
633 		this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634 
635 	      if (result == NULL)
636 		result = this_result;
637 	      else if (this_result)
638 		return NULL;
639 	    }
640 	}
641     }
642 
643   return result;
644 }
645 
646 
647 /* See if DEST, produced in INSN, is used only a single time in the
648    sequel.  If so, return a pointer to the innermost rtx expression in which
649    it is used.
650 
651    If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652 
653    If DEST is cc0_rtx, we look only at the next insn.  In that case, we don't
654    care about REG_DEAD notes or LOG_LINKS.
655 
656    Otherwise, we find the single use by finding an insn that has a
657    LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST.  If DEST is
658    only referenced once in that insn, we know that it must be the first
659    and last insn referencing DEST.  */
660 
661 static rtx *
662 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
663 {
664   basic_block bb;
665   rtx_insn *next;
666   rtx *result;
667   struct insn_link *link;
668 
669   if (dest == cc0_rtx)
670     {
671       next = NEXT_INSN (insn);
672       if (next == 0
673 	  || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
674 	return 0;
675 
676       result = find_single_use_1 (dest, &PATTERN (next));
677       if (result && ploc)
678 	*ploc = next;
679       return result;
680     }
681 
682   if (!REG_P (dest))
683     return 0;
684 
685   bb = BLOCK_FOR_INSN (insn);
686   for (next = NEXT_INSN (insn);
687        next && BLOCK_FOR_INSN (next) == bb;
688        next = NEXT_INSN (next))
689     if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
690       {
691 	FOR_EACH_LOG_LINK (link, next)
692 	  if (link->insn == insn && link->regno == REGNO (dest))
693 	    break;
694 
695 	if (link)
696 	  {
697 	    result = find_single_use_1 (dest, &PATTERN (next));
698 	    if (ploc)
699 	      *ploc = next;
700 	    return result;
701 	  }
702       }
703 
704   return 0;
705 }
706 
707 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
708    insn.  The substitution can be undone by undo_all.  If INTO is already
709    set to NEWVAL, do not record this change.  Because computing NEWVAL might
710    also call SUBST, we have to compute it before we put anything into
711    the undo table.  */
712 
713 static void
714 do_SUBST (rtx *into, rtx newval)
715 {
716   struct undo *buf;
717   rtx oldval = *into;
718 
719   if (oldval == newval)
720     return;
721 
722   /* We'd like to catch as many invalid transformations here as
723      possible.  Unfortunately, there are way too many mode changes
724      that are perfectly valid, so we'd waste too much effort for
725      little gain doing the checks here.  Focus on catching invalid
726      transformations involving integer constants.  */
727   if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
728       && CONST_INT_P (newval))
729     {
730       /* Sanity check that we're replacing oldval with a CONST_INT
731 	 that is a valid sign-extension for the original mode.  */
732       gcc_assert (INTVAL (newval)
733 		  == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734 
735       /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
736 	 CONST_INT is not valid, because after the replacement, the
737 	 original mode would be gone.  Unfortunately, we can't tell
738 	 when do_SUBST is called to replace the operand thereof, so we
739 	 perform this test on oldval instead, checking whether an
740 	 invalid replacement took place before we got here.  */
741       gcc_assert (!(GET_CODE (oldval) == SUBREG
742 		    && CONST_INT_P (SUBREG_REG (oldval))));
743       gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
744 		    && CONST_INT_P (XEXP (oldval, 0))));
745     }
746 
747   if (undobuf.frees)
748     buf = undobuf.frees, undobuf.frees = buf->next;
749   else
750     buf = XNEW (struct undo);
751 
752   buf->kind = UNDO_RTX;
753   buf->where.r = into;
754   buf->old_contents.r = oldval;
755   *into = newval;
756 
757   buf->next = undobuf.undos, undobuf.undos = buf;
758 }
759 
760 #define SUBST(INTO, NEWVAL)	do_SUBST (&(INTO), (NEWVAL))
761 
762 /* Similar to SUBST, but NEWVAL is an int expression.  Note that substitution
763    for the value of a HOST_WIDE_INT value (including CONST_INT) is
764    not safe.  */
765 
766 static void
767 do_SUBST_INT (int *into, int newval)
768 {
769   struct undo *buf;
770   int oldval = *into;
771 
772   if (oldval == newval)
773     return;
774 
775   if (undobuf.frees)
776     buf = undobuf.frees, undobuf.frees = buf->next;
777   else
778     buf = XNEW (struct undo);
779 
780   buf->kind = UNDO_INT;
781   buf->where.i = into;
782   buf->old_contents.i = oldval;
783   *into = newval;
784 
785   buf->next = undobuf.undos, undobuf.undos = buf;
786 }
787 
788 #define SUBST_INT(INTO, NEWVAL)  do_SUBST_INT (&(INTO), (NEWVAL))
789 
790 /* Similar to SUBST, but just substitute the mode.  This is used when
791    changing the mode of a pseudo-register, so that any other
792    references to the entry in the regno_reg_rtx array will change as
793    well.  */
794 
795 static void
796 do_SUBST_MODE (rtx *into, machine_mode newval)
797 {
798   struct undo *buf;
799   machine_mode oldval = GET_MODE (*into);
800 
801   if (oldval == newval)
802     return;
803 
804   if (undobuf.frees)
805     buf = undobuf.frees, undobuf.frees = buf->next;
806   else
807     buf = XNEW (struct undo);
808 
809   buf->kind = UNDO_MODE;
810   buf->where.r = into;
811   buf->old_contents.m = oldval;
812   adjust_reg_mode (*into, newval);
813 
814   buf->next = undobuf.undos, undobuf.undos = buf;
815 }
816 
817 #define SUBST_MODE(INTO, NEWVAL)  do_SUBST_MODE (&(INTO), (NEWVAL))
818 
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression.  */
820 
821 static void
822 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 {
824   struct undo *buf;
825   struct insn_link * oldval = *into;
826 
827   if (oldval == newval)
828     return;
829 
830   if (undobuf.frees)
831     buf = undobuf.frees, undobuf.frees = buf->next;
832   else
833     buf = XNEW (struct undo);
834 
835   buf->kind = UNDO_LINKS;
836   buf->where.l = into;
837   buf->old_contents.l = oldval;
838   *into = newval;
839 
840   buf->next = undobuf.undos, undobuf.undos = buf;
841 }
842 
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 
845 /* Subroutine of try_combine.  Determine whether the replacement patterns
846    NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
847    than the original sequence I0, I1, I2, I3 and undobuf.other_insn.  Note
848    that I0, I1 and/or NEWI2PAT may be NULL_RTX.  Similarly, NEWOTHERPAT and
849    undobuf.other_insn may also both be NULL_RTX.  Return false if the cost
850    of all the instructions can be estimated and the replacements are more
851    expensive than the original sequence.  */
852 
853 static bool
854 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
855 		       rtx newpat, rtx newi2pat, rtx newotherpat)
856 {
857   int i0_cost, i1_cost, i2_cost, i3_cost;
858   int new_i2_cost, new_i3_cost;
859   int old_cost, new_cost;
860 
861   /* Lookup the original insn_rtx_costs.  */
862   i2_cost = INSN_COST (i2);
863   i3_cost = INSN_COST (i3);
864 
865   if (i1)
866     {
867       i1_cost = INSN_COST (i1);
868       if (i0)
869 	{
870 	  i0_cost = INSN_COST (i0);
871 	  old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
872 		      ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
873 	}
874       else
875 	{
876 	  old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
877 		      ? i1_cost + i2_cost + i3_cost : 0);
878 	  i0_cost = 0;
879 	}
880     }
881   else
882     {
883       old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
884       i1_cost = i0_cost = 0;
885     }
886 
887   /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
888      correct that.  */
889   if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
890     old_cost -= i1_cost;
891 
892 
893   /* Calculate the replacement insn_rtx_costs.  */
894   new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
895   if (newi2pat)
896     {
897       new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
898       new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
899 		 ? new_i2_cost + new_i3_cost : 0;
900     }
901   else
902     {
903       new_cost = new_i3_cost;
904       new_i2_cost = 0;
905     }
906 
907   if (undobuf.other_insn)
908     {
909       int old_other_cost, new_other_cost;
910 
911       old_other_cost = INSN_COST (undobuf.other_insn);
912       new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
913       if (old_other_cost > 0 && new_other_cost > 0)
914 	{
915 	  old_cost += old_other_cost;
916 	  new_cost += new_other_cost;
917 	}
918       else
919 	old_cost = 0;
920     }
921 
922   /* Disallow this combination if both new_cost and old_cost are greater than
923      zero, and new_cost is greater than old cost.  */
924   int reject = old_cost > 0 && new_cost > old_cost;
925 
926   if (dump_file)
927     {
928       fprintf (dump_file, "%s combination of insns ",
929 	       reject ? "rejecting" : "allowing");
930       if (i0)
931 	fprintf (dump_file, "%d, ", INSN_UID (i0));
932       if (i1 && INSN_UID (i1) != INSN_UID (i2))
933 	fprintf (dump_file, "%d, ", INSN_UID (i1));
934       fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
935 
936       fprintf (dump_file, "original costs ");
937       if (i0)
938 	fprintf (dump_file, "%d + ", i0_cost);
939       if (i1 && INSN_UID (i1) != INSN_UID (i2))
940 	fprintf (dump_file, "%d + ", i1_cost);
941       fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
942 
943       if (newi2pat)
944 	fprintf (dump_file, "replacement costs %d + %d = %d\n",
945 		 new_i2_cost, new_i3_cost, new_cost);
946       else
947 	fprintf (dump_file, "replacement cost %d\n", new_cost);
948     }
949 
950   if (reject)
951     return false;
952 
953   /* Update the uid_insn_cost array with the replacement costs.  */
954   INSN_COST (i2) = new_i2_cost;
955   INSN_COST (i3) = new_i3_cost;
956   if (i1)
957     {
958       INSN_COST (i1) = 0;
959       if (i0)
960 	INSN_COST (i0) = 0;
961     }
962 
963   return true;
964 }
965 
966 
967 /* Delete any insns that copy a register to itself.  */
968 
969 static void
970 delete_noop_moves (void)
971 {
972   rtx_insn *insn, *next;
973   basic_block bb;
974 
975   FOR_EACH_BB_FN (bb, cfun)
976     {
977       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
978 	{
979 	  next = NEXT_INSN (insn);
980 	  if (INSN_P (insn) && noop_move_p (insn))
981 	    {
982 	      if (dump_file)
983 		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
984 
985 	      delete_insn_and_edges (insn);
986 	    }
987 	}
988     }
989 }
990 
991 
992 /* Return false if we do not want to (or cannot) combine DEF.  */
993 static bool
994 can_combine_def_p (df_ref def)
995 {
996   /* Do not consider if it is pre/post modification in MEM.  */
997   if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
998     return false;
999 
1000   unsigned int regno = DF_REF_REGNO (def);
1001 
1002   /* Do not combine frame pointer adjustments.  */
1003   if ((regno == FRAME_POINTER_REGNUM
1004        && (!reload_completed || frame_pointer_needed))
1005       || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1006 	  && regno == HARD_FRAME_POINTER_REGNUM
1007 	  && (!reload_completed || frame_pointer_needed))
1008       || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1009 	  && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1010     return false;
1011 
1012   return true;
1013 }
1014 
1015 /* Return false if we do not want to (or cannot) combine USE.  */
1016 static bool
1017 can_combine_use_p (df_ref use)
1018 {
1019   /* Do not consider the usage of the stack pointer by function call.  */
1020   if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1021     return false;
1022 
1023   return true;
1024 }
1025 
1026 /* Fill in log links field for all insns.  */
1027 
1028 static void
1029 create_log_links (void)
1030 {
1031   basic_block bb;
1032   rtx_insn **next_use;
1033   rtx_insn *insn;
1034   df_ref def, use;
1035 
1036   next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1037 
1038   /* Pass through each block from the end, recording the uses of each
1039      register and establishing log links when def is encountered.
1040      Note that we do not clear next_use array in order to save time,
1041      so we have to test whether the use is in the same basic block as def.
1042 
1043      There are a few cases below when we do not consider the definition or
1044      usage -- these are taken from original flow.c did. Don't ask me why it is
1045      done this way; I don't know and if it works, I don't want to know.  */
1046 
1047   FOR_EACH_BB_FN (bb, cfun)
1048     {
1049       FOR_BB_INSNS_REVERSE (bb, insn)
1050         {
1051           if (!NONDEBUG_INSN_P (insn))
1052             continue;
1053 
1054 	  /* Log links are created only once.  */
1055 	  gcc_assert (!LOG_LINKS (insn));
1056 
1057 	  FOR_EACH_INSN_DEF (def, insn)
1058             {
1059               unsigned int regno = DF_REF_REGNO (def);
1060               rtx_insn *use_insn;
1061 
1062               if (!next_use[regno])
1063                 continue;
1064 
1065 	      if (!can_combine_def_p (def))
1066 		continue;
1067 
1068 	      use_insn = next_use[regno];
1069 	      next_use[regno] = NULL;
1070 
1071 	      if (BLOCK_FOR_INSN (use_insn) != bb)
1072 		continue;
1073 
1074 	      /* flow.c claimed:
1075 
1076 		 We don't build a LOG_LINK for hard registers contained
1077 		 in ASM_OPERANDs.  If these registers get replaced,
1078 		 we might wind up changing the semantics of the insn,
1079 		 even if reload can make what appear to be valid
1080 		 assignments later.  */
1081 	      if (regno < FIRST_PSEUDO_REGISTER
1082 		  && asm_noperands (PATTERN (use_insn)) >= 0)
1083 		continue;
1084 
1085 	      /* Don't add duplicate links between instructions.  */
1086 	      struct insn_link *links;
1087 	      FOR_EACH_LOG_LINK (links, use_insn)
1088 	        if (insn == links->insn && regno == links->regno)
1089 		  break;
1090 
1091 	      if (!links)
1092 		LOG_LINKS (use_insn)
1093 		  = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1094             }
1095 
1096 	  FOR_EACH_INSN_USE (use, insn)
1097 	    if (can_combine_use_p (use))
1098 	      next_use[DF_REF_REGNO (use)] = insn;
1099         }
1100     }
1101 
1102   free (next_use);
1103 }
1104 
1105 /* Walk the LOG_LINKS of insn B to see if we find a reference to A.  Return
1106    true if we found a LOG_LINK that proves that A feeds B.  This only works
1107    if there are no instructions between A and B which could have a link
1108    depending on A, since in that case we would not record a link for B.
1109    We also check the implicit dependency created by a cc0 setter/user
1110    pair.  */
1111 
1112 static bool
1113 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1114 {
1115   struct insn_link *links;
1116   FOR_EACH_LOG_LINK (links, b)
1117     if (links->insn == a)
1118       return true;
1119   if (HAVE_cc0 && sets_cc0_p (a))
1120     return true;
1121   return false;
1122 }
1123 
1124 /* Main entry point for combiner.  F is the first insn of the function.
1125    NREGS is the first unused pseudo-reg number.
1126 
1127    Return nonzero if the combiner has turned an indirect jump
1128    instruction into a direct jump.  */
1129 static int
1130 combine_instructions (rtx_insn *f, unsigned int nregs)
1131 {
1132   rtx_insn *insn, *next;
1133   rtx_insn *prev;
1134   struct insn_link *links, *nextlinks;
1135   rtx_insn *first;
1136   basic_block last_bb;
1137 
1138   int new_direct_jump_p = 0;
1139 
1140   for (first = f; first && !NONDEBUG_INSN_P (first); )
1141     first = NEXT_INSN (first);
1142   if (!first)
1143     return 0;
1144 
1145   combine_attempts = 0;
1146   combine_merges = 0;
1147   combine_extras = 0;
1148   combine_successes = 0;
1149 
1150   rtl_hooks = combine_rtl_hooks;
1151 
1152   reg_stat.safe_grow_cleared (nregs);
1153 
1154   init_recog_no_volatile ();
1155 
1156   /* Allocate array for insn info.  */
1157   max_uid_known = get_max_uid ();
1158   uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1159   uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1160   gcc_obstack_init (&insn_link_obstack);
1161 
1162   nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1163 
1164   /* Don't use reg_stat[].nonzero_bits when computing it.  This can cause
1165      problems when, for example, we have j <<= 1 in a loop.  */
1166 
1167   nonzero_sign_valid = 0;
1168   label_tick = label_tick_ebb_start = 1;
1169 
1170   /* Scan all SETs and see if we can deduce anything about what
1171      bits are known to be zero for some registers and how many copies
1172      of the sign bit are known to exist for those registers.
1173 
1174      Also set any known values so that we can use it while searching
1175      for what bits are known to be set.  */
1176 
1177   setup_incoming_promotions (first);
1178   /* Allow the entry block and the first block to fall into the same EBB.
1179      Conceptually the incoming promotions are assigned to the entry block.  */
1180   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1181 
1182   create_log_links ();
1183   FOR_EACH_BB_FN (this_basic_block, cfun)
1184     {
1185       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1186       last_call_luid = 0;
1187       mem_last_set = -1;
1188 
1189       label_tick++;
1190       if (!single_pred_p (this_basic_block)
1191 	  || single_pred (this_basic_block) != last_bb)
1192 	label_tick_ebb_start = label_tick;
1193       last_bb = this_basic_block;
1194 
1195       FOR_BB_INSNS (this_basic_block, insn)
1196         if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1197 	  {
1198             rtx links;
1199 
1200             subst_low_luid = DF_INSN_LUID (insn);
1201             subst_insn = insn;
1202 
1203 	    note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1204 		         insn);
1205 	    record_dead_and_set_regs (insn);
1206 
1207 	    if (AUTO_INC_DEC)
1208 	      for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1209 		if (REG_NOTE_KIND (links) == REG_INC)
1210 		  set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1211 						    insn);
1212 
1213 	    /* Record the current insn_rtx_cost of this instruction.  */
1214 	    if (NONJUMP_INSN_P (insn))
1215 	      INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1216 	      					optimize_this_for_speed_p);
1217 	    if (dump_file)
1218 	      fprintf (dump_file, "insn_cost %d: %d\n",
1219 		       INSN_UID (insn), INSN_COST (insn));
1220 	  }
1221     }
1222 
1223   nonzero_sign_valid = 1;
1224 
1225   /* Now scan all the insns in forward order.  */
1226   label_tick = label_tick_ebb_start = 1;
1227   init_reg_last ();
1228   setup_incoming_promotions (first);
1229   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1230   int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1231 
1232   FOR_EACH_BB_FN (this_basic_block, cfun)
1233     {
1234       rtx_insn *last_combined_insn = NULL;
1235       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1236       last_call_luid = 0;
1237       mem_last_set = -1;
1238 
1239       label_tick++;
1240       if (!single_pred_p (this_basic_block)
1241 	  || single_pred (this_basic_block) != last_bb)
1242 	label_tick_ebb_start = label_tick;
1243       last_bb = this_basic_block;
1244 
1245       rtl_profile_for_bb (this_basic_block);
1246       for (insn = BB_HEAD (this_basic_block);
1247 	   insn != NEXT_INSN (BB_END (this_basic_block));
1248 	   insn = next ? next : NEXT_INSN (insn))
1249 	{
1250 	  next = 0;
1251 	  if (!NONDEBUG_INSN_P (insn))
1252 	    continue;
1253 
1254 	  while (last_combined_insn
1255 		 && last_combined_insn->deleted ())
1256 	    last_combined_insn = PREV_INSN (last_combined_insn);
1257 	  if (last_combined_insn == NULL_RTX
1258 	      || BARRIER_P (last_combined_insn)
1259 	      || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1260 	      || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1261 	    last_combined_insn = insn;
1262 
1263 	  /* See if we know about function return values before this
1264 	     insn based upon SUBREG flags.  */
1265 	  check_promoted_subreg (insn, PATTERN (insn));
1266 
1267 	  /* See if we can find hardregs and subreg of pseudos in
1268 	     narrower modes.  This could help turning TRUNCATEs
1269 	     into SUBREGs.  */
1270 	  note_uses (&PATTERN (insn), record_truncated_values, NULL);
1271 
1272 	  /* Try this insn with each insn it links back to.  */
1273 
1274 	  FOR_EACH_LOG_LINK (links, insn)
1275 	    if ((next = try_combine (insn, links->insn, NULL,
1276 				     NULL, &new_direct_jump_p,
1277 				     last_combined_insn)) != 0)
1278 	      {
1279 		statistics_counter_event (cfun, "two-insn combine", 1);
1280 		goto retry;
1281 	      }
1282 
1283 	  /* Try each sequence of three linked insns ending with this one.  */
1284 
1285 	  if (max_combine >= 3)
1286 	    FOR_EACH_LOG_LINK (links, insn)
1287 	      {
1288 		rtx_insn *link = links->insn;
1289 
1290 		/* If the linked insn has been replaced by a note, then there
1291 		   is no point in pursuing this chain any further.  */
1292 		if (NOTE_P (link))
1293 		  continue;
1294 
1295 		FOR_EACH_LOG_LINK (nextlinks, link)
1296 		  if ((next = try_combine (insn, link, nextlinks->insn,
1297 					   NULL, &new_direct_jump_p,
1298 					   last_combined_insn)) != 0)
1299 		    {
1300 		      statistics_counter_event (cfun, "three-insn combine", 1);
1301 		      goto retry;
1302 		    }
1303 	      }
1304 
1305 	  /* Try to combine a jump insn that uses CC0
1306 	     with a preceding insn that sets CC0, and maybe with its
1307 	     logical predecessor as well.
1308 	     This is how we make decrement-and-branch insns.
1309 	     We need this special code because data flow connections
1310 	     via CC0 do not get entered in LOG_LINKS.  */
1311 
1312 	  if (HAVE_cc0
1313 	      && JUMP_P (insn)
1314 	      && (prev = prev_nonnote_insn (insn)) != 0
1315 	      && NONJUMP_INSN_P (prev)
1316 	      && sets_cc0_p (PATTERN (prev)))
1317 	    {
1318 	      if ((next = try_combine (insn, prev, NULL, NULL,
1319 				       &new_direct_jump_p,
1320 				       last_combined_insn)) != 0)
1321 		goto retry;
1322 
1323 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1324 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1325 					   NULL, &new_direct_jump_p,
1326 					   last_combined_insn)) != 0)
1327 		    goto retry;
1328 	    }
1329 
1330 	  /* Do the same for an insn that explicitly references CC0.  */
1331 	  if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1332 	      && (prev = prev_nonnote_insn (insn)) != 0
1333 	      && NONJUMP_INSN_P (prev)
1334 	      && sets_cc0_p (PATTERN (prev))
1335 	      && GET_CODE (PATTERN (insn)) == SET
1336 	      && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1337 	    {
1338 	      if ((next = try_combine (insn, prev, NULL, NULL,
1339 				       &new_direct_jump_p,
1340 				       last_combined_insn)) != 0)
1341 		goto retry;
1342 
1343 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1344 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1345 					   NULL, &new_direct_jump_p,
1346 					   last_combined_insn)) != 0)
1347 		    goto retry;
1348 	    }
1349 
1350 	  /* Finally, see if any of the insns that this insn links to
1351 	     explicitly references CC0.  If so, try this insn, that insn,
1352 	     and its predecessor if it sets CC0.  */
1353 	  if (HAVE_cc0)
1354 	    {
1355 	      FOR_EACH_LOG_LINK (links, insn)
1356 		if (NONJUMP_INSN_P (links->insn)
1357 		    && GET_CODE (PATTERN (links->insn)) == SET
1358 		    && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1359 		    && (prev = prev_nonnote_insn (links->insn)) != 0
1360 		    && NONJUMP_INSN_P (prev)
1361 		    && sets_cc0_p (PATTERN (prev))
1362 		    && (next = try_combine (insn, links->insn,
1363 					    prev, NULL, &new_direct_jump_p,
1364 					    last_combined_insn)) != 0)
1365 		  goto retry;
1366 	    }
1367 
1368 	  /* Try combining an insn with two different insns whose results it
1369 	     uses.  */
1370 	  if (max_combine >= 3)
1371 	    FOR_EACH_LOG_LINK (links, insn)
1372 	      for (nextlinks = links->next; nextlinks;
1373 		   nextlinks = nextlinks->next)
1374 		if ((next = try_combine (insn, links->insn,
1375 					 nextlinks->insn, NULL,
1376 					 &new_direct_jump_p,
1377 					 last_combined_insn)) != 0)
1378 
1379 		  {
1380 		    statistics_counter_event (cfun, "three-insn combine", 1);
1381 		    goto retry;
1382 		  }
1383 
1384 	  /* Try four-instruction combinations.  */
1385 	  if (max_combine >= 4)
1386 	    FOR_EACH_LOG_LINK (links, insn)
1387 	      {
1388 		struct insn_link *next1;
1389 		rtx_insn *link = links->insn;
1390 
1391 		/* If the linked insn has been replaced by a note, then there
1392 		   is no point in pursuing this chain any further.  */
1393 		if (NOTE_P (link))
1394 		  continue;
1395 
1396 		FOR_EACH_LOG_LINK (next1, link)
1397 		  {
1398 		    rtx_insn *link1 = next1->insn;
1399 		    if (NOTE_P (link1))
1400 		      continue;
1401 		    /* I0 -> I1 -> I2 -> I3.  */
1402 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1403 		      if ((next = try_combine (insn, link, link1,
1404 					       nextlinks->insn,
1405 					       &new_direct_jump_p,
1406 					       last_combined_insn)) != 0)
1407 			{
1408 			  statistics_counter_event (cfun, "four-insn combine", 1);
1409 			  goto retry;
1410 			}
1411 		    /* I0, I1 -> I2, I2 -> I3.  */
1412 		    for (nextlinks = next1->next; nextlinks;
1413 			 nextlinks = nextlinks->next)
1414 		      if ((next = try_combine (insn, link, link1,
1415 					       nextlinks->insn,
1416 					       &new_direct_jump_p,
1417 					       last_combined_insn)) != 0)
1418 			{
1419 			  statistics_counter_event (cfun, "four-insn combine", 1);
1420 			  goto retry;
1421 			}
1422 		  }
1423 
1424 		for (next1 = links->next; next1; next1 = next1->next)
1425 		  {
1426 		    rtx_insn *link1 = next1->insn;
1427 		    if (NOTE_P (link1))
1428 		      continue;
1429 		    /* I0 -> I2; I1, I2 -> I3.  */
1430 		    FOR_EACH_LOG_LINK (nextlinks, link)
1431 		      if ((next = try_combine (insn, link, link1,
1432 					       nextlinks->insn,
1433 					       &new_direct_jump_p,
1434 					       last_combined_insn)) != 0)
1435 			{
1436 			  statistics_counter_event (cfun, "four-insn combine", 1);
1437 			  goto retry;
1438 			}
1439 		    /* I0 -> I1; I1, I2 -> I3.  */
1440 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1441 		      if ((next = try_combine (insn, link, link1,
1442 					       nextlinks->insn,
1443 					       &new_direct_jump_p,
1444 					       last_combined_insn)) != 0)
1445 			{
1446 			  statistics_counter_event (cfun, "four-insn combine", 1);
1447 			  goto retry;
1448 			}
1449 		  }
1450 	      }
1451 
1452 	  /* Try this insn with each REG_EQUAL note it links back to.  */
1453 	  FOR_EACH_LOG_LINK (links, insn)
1454 	    {
1455 	      rtx set, note;
1456 	      rtx_insn *temp = links->insn;
1457 	      if ((set = single_set (temp)) != 0
1458 		  && (note = find_reg_equal_equiv_note (temp)) != 0
1459 		  && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1460 		  /* Avoid using a register that may already been marked
1461 		     dead by an earlier instruction.  */
1462 		  && ! unmentioned_reg_p (note, SET_SRC (set))
1463 		  && (GET_MODE (note) == VOIDmode
1464 		      ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1465 		      : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1466 			 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1467 			     || (GET_MODE (XEXP (SET_DEST (set), 0))
1468 				 == GET_MODE (note))))))
1469 		{
1470 		  /* Temporarily replace the set's source with the
1471 		     contents of the REG_EQUAL note.  The insn will
1472 		     be deleted or recognized by try_combine.  */
1473 		  rtx orig_src = SET_SRC (set);
1474 		  rtx orig_dest = SET_DEST (set);
1475 		  if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1476 		    SET_DEST (set) = XEXP (SET_DEST (set), 0);
1477 		  SET_SRC (set) = note;
1478 		  i2mod = temp;
1479 		  i2mod_old_rhs = copy_rtx (orig_src);
1480 		  i2mod_new_rhs = copy_rtx (note);
1481 		  next = try_combine (insn, i2mod, NULL, NULL,
1482 				      &new_direct_jump_p,
1483 				      last_combined_insn);
1484 		  i2mod = NULL;
1485 		  if (next)
1486 		    {
1487 		      statistics_counter_event (cfun, "insn-with-note combine", 1);
1488 		      goto retry;
1489 		    }
1490 		  SET_SRC (set) = orig_src;
1491 		  SET_DEST (set) = orig_dest;
1492 		}
1493 	    }
1494 
1495 	  if (!NOTE_P (insn))
1496 	    record_dead_and_set_regs (insn);
1497 
1498 retry:
1499 	  ;
1500 	}
1501     }
1502 
1503   default_rtl_profile ();
1504   clear_bb_flags ();
1505   new_direct_jump_p |= purge_all_dead_edges ();
1506   delete_noop_moves ();
1507 
1508   /* Clean up.  */
1509   obstack_free (&insn_link_obstack, NULL);
1510   free (uid_log_links);
1511   free (uid_insn_cost);
1512   reg_stat.release ();
1513 
1514   {
1515     struct undo *undo, *next;
1516     for (undo = undobuf.frees; undo; undo = next)
1517       {
1518 	next = undo->next;
1519 	free (undo);
1520       }
1521     undobuf.frees = 0;
1522   }
1523 
1524   total_attempts += combine_attempts;
1525   total_merges += combine_merges;
1526   total_extras += combine_extras;
1527   total_successes += combine_successes;
1528 
1529   nonzero_sign_valid = 0;
1530   rtl_hooks = general_rtl_hooks;
1531 
1532   /* Make recognizer allow volatile MEMs again.  */
1533   init_recog ();
1534 
1535   return new_direct_jump_p;
1536 }
1537 
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass.  */
1539 
1540 static void
1541 init_reg_last (void)
1542 {
1543   unsigned int i;
1544   reg_stat_type *p;
1545 
1546   FOR_EACH_VEC_ELT (reg_stat, i, p)
1547     memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1548 }
1549 
1550 /* Set up any promoted values for incoming argument registers.  */
1551 
1552 static void
1553 setup_incoming_promotions (rtx_insn *first)
1554 {
1555   tree arg;
1556   bool strictly_local = false;
1557 
1558   for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1559        arg = DECL_CHAIN (arg))
1560     {
1561       rtx x, reg = DECL_INCOMING_RTL (arg);
1562       int uns1, uns3;
1563       machine_mode mode1, mode2, mode3, mode4;
1564 
1565       /* Only continue if the incoming argument is in a register.  */
1566       if (!REG_P (reg))
1567 	continue;
1568 
1569       /* Determine, if possible, whether all call sites of the current
1570          function lie within the current compilation unit.  (This does
1571 	 take into account the exporting of a function via taking its
1572 	 address, and so forth.)  */
1573       strictly_local = cgraph_node::local_info (current_function_decl)->local;
1574 
1575       /* The mode and signedness of the argument before any promotions happen
1576          (equal to the mode of the pseudo holding it at that stage).  */
1577       mode1 = TYPE_MODE (TREE_TYPE (arg));
1578       uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1579 
1580       /* The mode and signedness of the argument after any source language and
1581          TARGET_PROMOTE_PROTOTYPES-driven promotions.  */
1582       mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1583       uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1584 
1585       /* The mode and signedness of the argument as it is actually passed,
1586          see assign_parm_setup_reg in function.c.  */
1587       mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1588 				     TREE_TYPE (cfun->decl), 0);
1589 
1590       /* The mode of the register in which the argument is being passed.  */
1591       mode4 = GET_MODE (reg);
1592 
1593       /* Eliminate sign extensions in the callee when:
1594 	 (a) A mode promotion has occurred;  */
1595       if (mode1 == mode3)
1596 	continue;
1597       /* (b) The mode of the register is the same as the mode of
1598 	     the argument as it is passed; */
1599       if (mode3 != mode4)
1600 	continue;
1601       /* (c) There's no language level extension;  */
1602       if (mode1 == mode2)
1603 	;
1604       /* (c.1) All callers are from the current compilation unit.  If that's
1605 	 the case we don't have to rely on an ABI, we only have to know
1606 	 what we're generating right now, and we know that we will do the
1607 	 mode1 to mode2 promotion with the given sign.  */
1608       else if (!strictly_local)
1609 	continue;
1610       /* (c.2) The combination of the two promotions is useful.  This is
1611 	 true when the signs match, or if the first promotion is unsigned.
1612 	 In the later case, (sign_extend (zero_extend x)) is the same as
1613 	 (zero_extend (zero_extend x)), so make sure to force UNS3 true.  */
1614       else if (uns1)
1615 	uns3 = true;
1616       else if (uns3)
1617 	continue;
1618 
1619       /* Record that the value was promoted from mode1 to mode3,
1620 	 so that any sign extension at the head of the current
1621 	 function may be eliminated.  */
1622       x = gen_rtx_CLOBBER (mode1, const0_rtx);
1623       x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1624       record_value_for_reg (reg, first, x);
1625     }
1626 }
1627 
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629    that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630    because some machines (maybe most) will actually do the sign-extension and
1631    this is the conservative approach.
1632 
1633    ??? For 2.5, try to tighten up the MD files in this regard instead of this
1634    kludge.  */
1635 
1636 static rtx
1637 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1638 {
1639   if (GET_MODE_PRECISION (mode) < prec
1640       && CONST_INT_P (src)
1641       && INTVAL (src) > 0
1642       && val_signbit_known_set_p (mode, INTVAL (src)))
1643     src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (mode));
1644 
1645   return src;
1646 }
1647 
1648 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1649    and SET.  */
1650 
1651 static void
1652 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1653 			   rtx x)
1654 {
1655   rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1656   unsigned HOST_WIDE_INT bits = 0;
1657   rtx reg_equal = NULL, src = SET_SRC (set);
1658   unsigned int num = 0;
1659 
1660   if (reg_equal_note)
1661     reg_equal = XEXP (reg_equal_note, 0);
1662 
1663   if (SHORT_IMMEDIATES_SIGN_EXTEND)
1664     {
1665       src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1666       if (reg_equal)
1667 	reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1668     }
1669 
1670   /* Don't call nonzero_bits if it cannot change anything.  */
1671   if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1672     {
1673       bits = nonzero_bits (src, nonzero_bits_mode);
1674       if (reg_equal && bits)
1675 	bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1676       rsp->nonzero_bits |= bits;
1677     }
1678 
1679   /* Don't call num_sign_bit_copies if it cannot change anything.  */
1680   if (rsp->sign_bit_copies != 1)
1681     {
1682       num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1683       if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1684 	{
1685 	  unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1686 	  if (num == 0 || numeq > num)
1687 	    num = numeq;
1688 	}
1689       if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1690 	rsp->sign_bit_copies = num;
1691     }
1692 }
1693 
1694 /* Called via note_stores.  If X is a pseudo that is narrower than
1695    HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1696 
1697    If we are setting only a portion of X and we can't figure out what
1698    portion, assume all bits will be used since we don't know what will
1699    be happening.
1700 
1701    Similarly, set how many bits of X are known to be copies of the sign bit
1702    at all locations in the function.  This is the smallest number implied
1703    by any set of X.  */
1704 
1705 static void
1706 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1707 {
1708   rtx_insn *insn = (rtx_insn *) data;
1709 
1710   if (REG_P (x)
1711       && REGNO (x) >= FIRST_PSEUDO_REGISTER
1712       /* If this register is undefined at the start of the file, we can't
1713 	 say what its contents were.  */
1714       && ! REGNO_REG_SET_P
1715 	   (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1716       && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1717     {
1718       reg_stat_type *rsp = &reg_stat[REGNO (x)];
1719 
1720       if (set == 0 || GET_CODE (set) == CLOBBER)
1721 	{
1722 	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1723 	  rsp->sign_bit_copies = 1;
1724 	  return;
1725 	}
1726 
1727       /* If this register is being initialized using itself, and the
1728 	 register is uninitialized in this basic block, and there are
1729 	 no LOG_LINKS which set the register, then part of the
1730 	 register is uninitialized.  In that case we can't assume
1731 	 anything about the number of nonzero bits.
1732 
1733 	 ??? We could do better if we checked this in
1734 	 reg_{nonzero_bits,num_sign_bit_copies}_for_combine.  Then we
1735 	 could avoid making assumptions about the insn which initially
1736 	 sets the register, while still using the information in other
1737 	 insns.  We would have to be careful to check every insn
1738 	 involved in the combination.  */
1739 
1740       if (insn
1741 	  && reg_referenced_p (x, PATTERN (insn))
1742 	  && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1743 			       REGNO (x)))
1744 	{
1745 	  struct insn_link *link;
1746 
1747 	  FOR_EACH_LOG_LINK (link, insn)
1748 	    if (dead_or_set_p (link->insn, x))
1749 	      break;
1750 	  if (!link)
1751 	    {
1752 	      rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1753 	      rsp->sign_bit_copies = 1;
1754 	      return;
1755 	    }
1756 	}
1757 
1758       /* If this is a complex assignment, see if we can convert it into a
1759 	 simple assignment.  */
1760       set = expand_field_assignment (set);
1761 
1762       /* If this is a simple assignment, or we have a paradoxical SUBREG,
1763 	 set what we know about X.  */
1764 
1765       if (SET_DEST (set) == x
1766 	  || (paradoxical_subreg_p (SET_DEST (set))
1767 	      && SUBREG_REG (SET_DEST (set)) == x))
1768 	update_rsp_from_reg_equal (rsp, insn, set, x);
1769       else
1770 	{
1771 	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1772 	  rsp->sign_bit_copies = 1;
1773 	}
1774     }
1775 }
1776 
1777 /* See if INSN can be combined into I3.  PRED, PRED2, SUCC and SUCC2 are
1778    optionally insns that were previously combined into I3 or that will be
1779    combined into the merger of INSN and I3.  The order is PRED, PRED2,
1780    INSN, SUCC, SUCC2, I3.
1781 
1782    Return 0 if the combination is not allowed for any reason.
1783 
1784    If the combination is allowed, *PDEST will be set to the single
1785    destination of INSN and *PSRC to the single source, and this function
1786    will return 1.  */
1787 
1788 static int
1789 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1790 	       rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1791 	       rtx *pdest, rtx *psrc)
1792 {
1793   int i;
1794   const_rtx set = 0;
1795   rtx src, dest;
1796   rtx_insn *p;
1797   rtx link;
1798   bool all_adjacent = true;
1799   int (*is_volatile_p) (const_rtx);
1800 
1801   if (succ)
1802     {
1803       if (succ2)
1804 	{
1805 	  if (next_active_insn (succ2) != i3)
1806 	    all_adjacent = false;
1807 	  if (next_active_insn (succ) != succ2)
1808 	    all_adjacent = false;
1809 	}
1810       else if (next_active_insn (succ) != i3)
1811 	all_adjacent = false;
1812       if (next_active_insn (insn) != succ)
1813 	all_adjacent = false;
1814     }
1815   else if (next_active_insn (insn) != i3)
1816     all_adjacent = false;
1817 
1818   /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1819      or a PARALLEL consisting of such a SET and CLOBBERs.
1820 
1821      If INSN has CLOBBER parallel parts, ignore them for our processing.
1822      By definition, these happen during the execution of the insn.  When it
1823      is merged with another insn, all bets are off.  If they are, in fact,
1824      needed and aren't also supplied in I3, they may be added by
1825      recog_for_combine.  Otherwise, it won't match.
1826 
1827      We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1828      note.
1829 
1830      Get the source and destination of INSN.  If more than one, can't
1831      combine.  */
1832 
1833   if (GET_CODE (PATTERN (insn)) == SET)
1834     set = PATTERN (insn);
1835   else if (GET_CODE (PATTERN (insn)) == PARALLEL
1836 	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1837     {
1838       for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1839 	{
1840 	  rtx elt = XVECEXP (PATTERN (insn), 0, i);
1841 
1842 	  switch (GET_CODE (elt))
1843 	    {
1844 	    /* This is important to combine floating point insns
1845 	       for the SH4 port.  */
1846 	    case USE:
1847 	      /* Combining an isolated USE doesn't make sense.
1848 		 We depend here on combinable_i3pat to reject them.  */
1849 	      /* The code below this loop only verifies that the inputs of
1850 		 the SET in INSN do not change.  We call reg_set_between_p
1851 		 to verify that the REG in the USE does not change between
1852 		 I3 and INSN.
1853 		 If the USE in INSN was for a pseudo register, the matching
1854 		 insn pattern will likely match any register; combining this
1855 		 with any other USE would only be safe if we knew that the
1856 		 used registers have identical values, or if there was
1857 		 something to tell them apart, e.g. different modes.  For
1858 		 now, we forgo such complicated tests and simply disallow
1859 		 combining of USES of pseudo registers with any other USE.  */
1860 	      if (REG_P (XEXP (elt, 0))
1861 		  && GET_CODE (PATTERN (i3)) == PARALLEL)
1862 		{
1863 		  rtx i3pat = PATTERN (i3);
1864 		  int i = XVECLEN (i3pat, 0) - 1;
1865 		  unsigned int regno = REGNO (XEXP (elt, 0));
1866 
1867 		  do
1868 		    {
1869 		      rtx i3elt = XVECEXP (i3pat, 0, i);
1870 
1871 		      if (GET_CODE (i3elt) == USE
1872 			  && REG_P (XEXP (i3elt, 0))
1873 			  && (REGNO (XEXP (i3elt, 0)) == regno
1874 			      ? reg_set_between_p (XEXP (elt, 0),
1875 						   PREV_INSN (insn), i3)
1876 			      : regno >= FIRST_PSEUDO_REGISTER))
1877 			return 0;
1878 		    }
1879 		  while (--i >= 0);
1880 		}
1881 	      break;
1882 
1883 	      /* We can ignore CLOBBERs.  */
1884 	    case CLOBBER:
1885 	      break;
1886 
1887 	    case SET:
1888 	      /* Ignore SETs whose result isn't used but not those that
1889 		 have side-effects.  */
1890 	      if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1891 		  && insn_nothrow_p (insn)
1892 		  && !side_effects_p (elt))
1893 		break;
1894 
1895 	      /* If we have already found a SET, this is a second one and
1896 		 so we cannot combine with this insn.  */
1897 	      if (set)
1898 		return 0;
1899 
1900 	      set = elt;
1901 	      break;
1902 
1903 	    default:
1904 	      /* Anything else means we can't combine.  */
1905 	      return 0;
1906 	    }
1907 	}
1908 
1909       if (set == 0
1910 	  /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1911 	     so don't do anything with it.  */
1912 	  || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1913 	return 0;
1914     }
1915   else
1916     return 0;
1917 
1918   if (set == 0)
1919     return 0;
1920 
1921   /* The simplification in expand_field_assignment may call back to
1922      get_last_value, so set safe guard here.  */
1923   subst_low_luid = DF_INSN_LUID (insn);
1924 
1925   set = expand_field_assignment (set);
1926   src = SET_SRC (set), dest = SET_DEST (set);
1927 
1928   /* Do not eliminate user-specified register if it is in an
1929      asm input because we may break the register asm usage defined
1930      in GCC manual if allow to do so.
1931      Be aware that this may cover more cases than we expect but this
1932      should be harmless.  */
1933   if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1934       && extract_asm_operands (PATTERN (i3)))
1935     return 0;
1936 
1937   /* Don't eliminate a store in the stack pointer.  */
1938   if (dest == stack_pointer_rtx
1939       /* Don't combine with an insn that sets a register to itself if it has
1940 	 a REG_EQUAL note.  This may be part of a LIBCALL sequence.  */
1941       || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1942       /* Can't merge an ASM_OPERANDS.  */
1943       || GET_CODE (src) == ASM_OPERANDS
1944       /* Can't merge a function call.  */
1945       || GET_CODE (src) == CALL
1946       /* Don't eliminate a function call argument.  */
1947       || (CALL_P (i3)
1948 	  && (find_reg_fusage (i3, USE, dest)
1949 	      || (REG_P (dest)
1950 		  && REGNO (dest) < FIRST_PSEUDO_REGISTER
1951 		  && global_regs[REGNO (dest)])))
1952       /* Don't substitute into an incremented register.  */
1953       || FIND_REG_INC_NOTE (i3, dest)
1954       || (succ && FIND_REG_INC_NOTE (succ, dest))
1955       || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1956       /* Don't substitute into a non-local goto, this confuses CFG.  */
1957       || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1958       /* Make sure that DEST is not used after SUCC but before I3.  */
1959       || (!all_adjacent
1960 	  && ((succ2
1961 	       && (reg_used_between_p (dest, succ2, i3)
1962 		   || reg_used_between_p (dest, succ, succ2)))
1963 	      || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1964       /* Make sure that the value that is to be substituted for the register
1965 	 does not use any registers whose values alter in between.  However,
1966 	 If the insns are adjacent, a use can't cross a set even though we
1967 	 think it might (this can happen for a sequence of insns each setting
1968 	 the same destination; last_set of that register might point to
1969 	 a NOTE).  If INSN has a REG_EQUIV note, the register is always
1970 	 equivalent to the memory so the substitution is valid even if there
1971 	 are intervening stores.  Also, don't move a volatile asm or
1972 	 UNSPEC_VOLATILE across any other insns.  */
1973       || (! all_adjacent
1974 	  && (((!MEM_P (src)
1975 		|| ! find_reg_note (insn, REG_EQUIV, src))
1976 	       && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1977 	      || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1978 	      || GET_CODE (src) == UNSPEC_VOLATILE))
1979       /* Don't combine across a CALL_INSN, because that would possibly
1980 	 change whether the life span of some REGs crosses calls or not,
1981 	 and it is a pain to update that information.
1982 	 Exception: if source is a constant, moving it later can't hurt.
1983 	 Accept that as a special case.  */
1984       || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1985     return 0;
1986 
1987   /* DEST must either be a REG or CC0.  */
1988   if (REG_P (dest))
1989     {
1990       /* If register alignment is being enforced for multi-word items in all
1991 	 cases except for parameters, it is possible to have a register copy
1992 	 insn referencing a hard register that is not allowed to contain the
1993 	 mode being copied and which would not be valid as an operand of most
1994 	 insns.  Eliminate this problem by not combining with such an insn.
1995 
1996 	 Also, on some machines we don't want to extend the life of a hard
1997 	 register.  */
1998 
1999       if (REG_P (src)
2000 	  && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2001 	       && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
2002 	      /* Don't extend the life of a hard register unless it is
2003 		 user variable (if we have few registers) or it can't
2004 		 fit into the desired register (meaning something special
2005 		 is going on).
2006 		 Also avoid substituting a return register into I3, because
2007 		 reload can't handle a conflict with constraints of other
2008 		 inputs.  */
2009 	      || (REGNO (src) < FIRST_PSEUDO_REGISTER
2010 		  && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
2011 	return 0;
2012     }
2013   else if (GET_CODE (dest) != CC0)
2014     return 0;
2015 
2016 
2017   if (GET_CODE (PATTERN (i3)) == PARALLEL)
2018     for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2019       if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2020 	{
2021 	  rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2022 
2023 	  /* If the clobber represents an earlyclobber operand, we must not
2024 	     substitute an expression containing the clobbered register.
2025 	     As we do not analyze the constraint strings here, we have to
2026 	     make the conservative assumption.  However, if the register is
2027 	     a fixed hard reg, the clobber cannot represent any operand;
2028 	     we leave it up to the machine description to either accept or
2029 	     reject use-and-clobber patterns.  */
2030 	  if (!REG_P (reg)
2031 	      || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2032 	      || !fixed_regs[REGNO (reg)])
2033 	    if (reg_overlap_mentioned_p (reg, src))
2034 	      return 0;
2035 	}
2036 
2037   /* If INSN contains anything volatile, or is an `asm' (whether volatile
2038      or not), reject, unless nothing volatile comes between it and I3 */
2039 
2040   if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2041     {
2042       /* Make sure neither succ nor succ2 contains a volatile reference.  */
2043       if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2044 	return 0;
2045       if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2046 	return 0;
2047       /* We'll check insns between INSN and I3 below.  */
2048     }
2049 
2050   /* If INSN is an asm, and DEST is a hard register, reject, since it has
2051      to be an explicit register variable, and was chosen for a reason.  */
2052 
2053   if (GET_CODE (src) == ASM_OPERANDS
2054       && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2055     return 0;
2056 
2057   /* If INSN contains volatile references (specifically volatile MEMs),
2058      we cannot combine across any other volatile references.
2059      Even if INSN doesn't contain volatile references, any intervening
2060      volatile insn might affect machine state.  */
2061 
2062   is_volatile_p = volatile_refs_p (PATTERN (insn))
2063     ? volatile_refs_p
2064     : volatile_insn_p;
2065 
2066   for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2067     if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2068       return 0;
2069 
2070   /* If INSN contains an autoincrement or autodecrement, make sure that
2071      register is not used between there and I3, and not already used in
2072      I3 either.  Neither must it be used in PRED or SUCC, if they exist.
2073      Also insist that I3 not be a jump; if it were one
2074      and the incremented register were spilled, we would lose.  */
2075 
2076   if (AUTO_INC_DEC)
2077     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2078       if (REG_NOTE_KIND (link) == REG_INC
2079 	  && (JUMP_P (i3)
2080 	      || reg_used_between_p (XEXP (link, 0), insn, i3)
2081 	      || (pred != NULL_RTX
2082 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2083 	      || (pred2 != NULL_RTX
2084 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2085 	      || (succ != NULL_RTX
2086 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2087 	      || (succ2 != NULL_RTX
2088 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2089 	      || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2090 	return 0;
2091 
2092   /* Don't combine an insn that follows a CC0-setting insn.
2093      An insn that uses CC0 must not be separated from the one that sets it.
2094      We do, however, allow I2 to follow a CC0-setting insn if that insn
2095      is passed as I1; in that case it will be deleted also.
2096      We also allow combining in this case if all the insns are adjacent
2097      because that would leave the two CC0 insns adjacent as well.
2098      It would be more logical to test whether CC0 occurs inside I1 or I2,
2099      but that would be much slower, and this ought to be equivalent.  */
2100 
2101   if (HAVE_cc0)
2102     {
2103       p = prev_nonnote_insn (insn);
2104       if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2105 	  && ! all_adjacent)
2106 	return 0;
2107     }
2108 
2109   /* If we get here, we have passed all the tests and the combination is
2110      to be allowed.  */
2111 
2112   *pdest = dest;
2113   *psrc = src;
2114 
2115   return 1;
2116 }
2117 
2118 /* LOC is the location within I3 that contains its pattern or the component
2119    of a PARALLEL of the pattern.  We validate that it is valid for combining.
2120 
2121    One problem is if I3 modifies its output, as opposed to replacing it
2122    entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2123    doing so would produce an insn that is not equivalent to the original insns.
2124 
2125    Consider:
2126 
2127 	 (set (reg:DI 101) (reg:DI 100))
2128 	 (set (subreg:SI (reg:DI 101) 0) <foo>)
2129 
2130    This is NOT equivalent to:
2131 
2132 	 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2133 		    (set (reg:DI 101) (reg:DI 100))])
2134 
2135    Not only does this modify 100 (in which case it might still be valid
2136    if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2137 
2138    We can also run into a problem if I2 sets a register that I1
2139    uses and I1 gets directly substituted into I3 (not via I2).  In that
2140    case, we would be getting the wrong value of I2DEST into I3, so we
2141    must reject the combination.  This case occurs when I2 and I1 both
2142    feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2143    If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2144    of a SET must prevent combination from occurring.  The same situation
2145    can occur for I0, in which case I0_NOT_IN_SRC is set.
2146 
2147    Before doing the above check, we first try to expand a field assignment
2148    into a set of logical operations.
2149 
2150    If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2151    we place a register that is both set and used within I3.  If more than one
2152    such register is detected, we fail.
2153 
2154    Return 1 if the combination is valid, zero otherwise.  */
2155 
2156 static int
2157 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2158 		  int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2159 {
2160   rtx x = *loc;
2161 
2162   if (GET_CODE (x) == SET)
2163     {
2164       rtx set = x ;
2165       rtx dest = SET_DEST (set);
2166       rtx src = SET_SRC (set);
2167       rtx inner_dest = dest;
2168       rtx subdest;
2169 
2170       while (GET_CODE (inner_dest) == STRICT_LOW_PART
2171 	     || GET_CODE (inner_dest) == SUBREG
2172 	     || GET_CODE (inner_dest) == ZERO_EXTRACT)
2173 	inner_dest = XEXP (inner_dest, 0);
2174 
2175       /* Check for the case where I3 modifies its output, as discussed
2176 	 above.  We don't want to prevent pseudos from being combined
2177 	 into the address of a MEM, so only prevent the combination if
2178 	 i1 or i2 set the same MEM.  */
2179       if ((inner_dest != dest &&
2180 	   (!MEM_P (inner_dest)
2181 	    || rtx_equal_p (i2dest, inner_dest)
2182 	    || (i1dest && rtx_equal_p (i1dest, inner_dest))
2183 	    || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2184 	   && (reg_overlap_mentioned_p (i2dest, inner_dest)
2185 	       || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2186 	       || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2187 
2188 	  /* This is the same test done in can_combine_p except we can't test
2189 	     all_adjacent; we don't have to, since this instruction will stay
2190 	     in place, thus we are not considering increasing the lifetime of
2191 	     INNER_DEST.
2192 
2193 	     Also, if this insn sets a function argument, combining it with
2194 	     something that might need a spill could clobber a previous
2195 	     function argument; the all_adjacent test in can_combine_p also
2196 	     checks this; here, we do a more specific test for this case.  */
2197 
2198 	  || (REG_P (inner_dest)
2199 	      && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2200 	      && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2201 					GET_MODE (inner_dest))))
2202 	  || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2203 	  || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2204 	return 0;
2205 
2206       /* If DEST is used in I3, it is being killed in this insn, so
2207 	 record that for later.  We have to consider paradoxical
2208 	 subregs here, since they kill the whole register, but we
2209 	 ignore partial subregs, STRICT_LOW_PART, etc.
2210 	 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2211 	 STACK_POINTER_REGNUM, since these are always considered to be
2212 	 live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
2213       subdest = dest;
2214       if (GET_CODE (subdest) == SUBREG
2215 	  && (GET_MODE_SIZE (GET_MODE (subdest))
2216 	      >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2217 	subdest = SUBREG_REG (subdest);
2218       if (pi3dest_killed
2219 	  && REG_P (subdest)
2220 	  && reg_referenced_p (subdest, PATTERN (i3))
2221 	  && REGNO (subdest) != FRAME_POINTER_REGNUM
2222 	  && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2223 	      || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2224 	  && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2225 	      || (REGNO (subdest) != ARG_POINTER_REGNUM
2226 		  || ! fixed_regs [REGNO (subdest)]))
2227 	  && REGNO (subdest) != STACK_POINTER_REGNUM)
2228 	{
2229 	  if (*pi3dest_killed)
2230 	    return 0;
2231 
2232 	  *pi3dest_killed = subdest;
2233 	}
2234     }
2235 
2236   else if (GET_CODE (x) == PARALLEL)
2237     {
2238       int i;
2239 
2240       for (i = 0; i < XVECLEN (x, 0); i++)
2241 	if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2242 				i1_not_in_src, i0_not_in_src, pi3dest_killed))
2243 	  return 0;
2244     }
2245 
2246   return 1;
2247 }
2248 
2249 /* Return 1 if X is an arithmetic expression that contains a multiplication
2250    and division.  We don't count multiplications by powers of two here.  */
2251 
2252 static int
2253 contains_muldiv (rtx x)
2254 {
2255   switch (GET_CODE (x))
2256     {
2257     case MOD:  case DIV:  case UMOD:  case UDIV:
2258       return 1;
2259 
2260     case MULT:
2261       return ! (CONST_INT_P (XEXP (x, 1))
2262 		&& exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2263     default:
2264       if (BINARY_P (x))
2265 	return contains_muldiv (XEXP (x, 0))
2266 	    || contains_muldiv (XEXP (x, 1));
2267 
2268       if (UNARY_P (x))
2269 	return contains_muldiv (XEXP (x, 0));
2270 
2271       return 0;
2272     }
2273 }
2274 
2275 /* Determine whether INSN can be used in a combination.  Return nonzero if
2276    not.  This is used in try_combine to detect early some cases where we
2277    can't perform combinations.  */
2278 
2279 static int
2280 cant_combine_insn_p (rtx_insn *insn)
2281 {
2282   rtx set;
2283   rtx src, dest;
2284 
2285   /* If this isn't really an insn, we can't do anything.
2286      This can occur when flow deletes an insn that it has merged into an
2287      auto-increment address.  */
2288   if (!NONDEBUG_INSN_P (insn))
2289     return 1;
2290 
2291   /* Never combine loads and stores involving hard regs that are likely
2292      to be spilled.  The register allocator can usually handle such
2293      reg-reg moves by tying.  If we allow the combiner to make
2294      substitutions of likely-spilled regs, reload might die.
2295      As an exception, we allow combinations involving fixed regs; these are
2296      not available to the register allocator so there's no risk involved.  */
2297 
2298   set = single_set (insn);
2299   if (! set)
2300     return 0;
2301   src = SET_SRC (set);
2302   dest = SET_DEST (set);
2303   if (GET_CODE (src) == SUBREG)
2304     src = SUBREG_REG (src);
2305   if (GET_CODE (dest) == SUBREG)
2306     dest = SUBREG_REG (dest);
2307   if (REG_P (src) && REG_P (dest)
2308       && ((HARD_REGISTER_P (src)
2309 	   && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2310 	   && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2311 	  || (HARD_REGISTER_P (dest)
2312 	      && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2313 	      && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2314     return 1;
2315 
2316   return 0;
2317 }
2318 
2319 struct likely_spilled_retval_info
2320 {
2321   unsigned regno, nregs;
2322   unsigned mask;
2323 };
2324 
2325 /* Called via note_stores by likely_spilled_retval_p.  Remove from info->mask
2326    hard registers that are known to be written to / clobbered in full.  */
2327 static void
2328 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2329 {
2330   struct likely_spilled_retval_info *const info =
2331     (struct likely_spilled_retval_info *) data;
2332   unsigned regno, nregs;
2333   unsigned new_mask;
2334 
2335   if (!REG_P (XEXP (set, 0)))
2336     return;
2337   regno = REGNO (x);
2338   if (regno >= info->regno + info->nregs)
2339     return;
2340   nregs = REG_NREGS (x);
2341   if (regno + nregs <= info->regno)
2342     return;
2343   new_mask = (2U << (nregs - 1)) - 1;
2344   if (regno < info->regno)
2345     new_mask >>= info->regno - regno;
2346   else
2347     new_mask <<= regno - info->regno;
2348   info->mask &= ~new_mask;
2349 }
2350 
2351 /* Return nonzero iff part of the return value is live during INSN, and
2352    it is likely spilled.  This can happen when more than one insn is needed
2353    to copy the return value, e.g. when we consider to combine into the
2354    second copy insn for a complex value.  */
2355 
2356 static int
2357 likely_spilled_retval_p (rtx_insn *insn)
2358 {
2359   rtx_insn *use = BB_END (this_basic_block);
2360   rtx reg;
2361   rtx_insn *p;
2362   unsigned regno, nregs;
2363   /* We assume here that no machine mode needs more than
2364      32 hard registers when the value overlaps with a register
2365      for which TARGET_FUNCTION_VALUE_REGNO_P is true.  */
2366   unsigned mask;
2367   struct likely_spilled_retval_info info;
2368 
2369   if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2370     return 0;
2371   reg = XEXP (PATTERN (use), 0);
2372   if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2373     return 0;
2374   regno = REGNO (reg);
2375   nregs = REG_NREGS (reg);
2376   if (nregs == 1)
2377     return 0;
2378   mask = (2U << (nregs - 1)) - 1;
2379 
2380   /* Disregard parts of the return value that are set later.  */
2381   info.regno = regno;
2382   info.nregs = nregs;
2383   info.mask = mask;
2384   for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2385     if (INSN_P (p))
2386       note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2387   mask = info.mask;
2388 
2389   /* Check if any of the (probably) live return value registers is
2390      likely spilled.  */
2391   nregs --;
2392   do
2393     {
2394       if ((mask & 1 << nregs)
2395 	  && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2396 	return 1;
2397     } while (nregs--);
2398   return 0;
2399 }
2400 
2401 /* Adjust INSN after we made a change to its destination.
2402 
2403    Changing the destination can invalidate notes that say something about
2404    the results of the insn and a LOG_LINK pointing to the insn.  */
2405 
2406 static void
2407 adjust_for_new_dest (rtx_insn *insn)
2408 {
2409   /* For notes, be conservative and simply remove them.  */
2410   remove_reg_equal_equiv_notes (insn);
2411 
2412   /* The new insn will have a destination that was previously the destination
2413      of an insn just above it.  Call distribute_links to make a LOG_LINK from
2414      the next use of that destination.  */
2415 
2416   rtx set = single_set (insn);
2417   gcc_assert (set);
2418 
2419   rtx reg = SET_DEST (set);
2420 
2421   while (GET_CODE (reg) == ZERO_EXTRACT
2422 	 || GET_CODE (reg) == STRICT_LOW_PART
2423 	 || GET_CODE (reg) == SUBREG)
2424     reg = XEXP (reg, 0);
2425   gcc_assert (REG_P (reg));
2426 
2427   distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2428 
2429   df_insn_rescan (insn);
2430 }
2431 
2432 /* Return TRUE if combine can reuse reg X in mode MODE.
2433    ADDED_SETS is nonzero if the original set is still required.  */
2434 static bool
2435 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2436 {
2437   unsigned int regno;
2438 
2439   if (!REG_P (x))
2440     return false;
2441 
2442   regno = REGNO (x);
2443   /* Allow hard registers if the new mode is legal, and occupies no more
2444      registers than the old mode.  */
2445   if (regno < FIRST_PSEUDO_REGISTER)
2446     return (HARD_REGNO_MODE_OK (regno, mode)
2447 	    && REG_NREGS (x) >= hard_regno_nregs[regno][mode]);
2448 
2449   /* Or a pseudo that is only used once.  */
2450   return (regno < reg_n_sets_max
2451 	  && REG_N_SETS (regno) == 1
2452 	  && !added_sets
2453 	  && !REG_USERVAR_P (x));
2454 }
2455 
2456 
2457 /* Check whether X, the destination of a set, refers to part of
2458    the register specified by REG.  */
2459 
2460 static bool
2461 reg_subword_p (rtx x, rtx reg)
2462 {
2463   /* Check that reg is an integer mode register.  */
2464   if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2465     return false;
2466 
2467   if (GET_CODE (x) == STRICT_LOW_PART
2468       || GET_CODE (x) == ZERO_EXTRACT)
2469     x = XEXP (x, 0);
2470 
2471   return GET_CODE (x) == SUBREG
2472 	 && SUBREG_REG (x) == reg
2473 	 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2474 }
2475 
2476 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2477    Note that the INSN should be deleted *after* removing dead edges, so
2478    that the kept edge is the fallthrough edge for a (set (pc) (pc))
2479    but not for a (set (pc) (label_ref FOO)).  */
2480 
2481 static void
2482 update_cfg_for_uncondjump (rtx_insn *insn)
2483 {
2484   basic_block bb = BLOCK_FOR_INSN (insn);
2485   gcc_assert (BB_END (bb) == insn);
2486 
2487   purge_dead_edges (bb);
2488 
2489   delete_insn (insn);
2490   if (EDGE_COUNT (bb->succs) == 1)
2491     {
2492       rtx_insn *insn;
2493 
2494       single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2495 
2496       /* Remove barriers from the footer if there are any.  */
2497       for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2498 	if (BARRIER_P (insn))
2499 	  {
2500 	    if (PREV_INSN (insn))
2501 	      SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2502 	    else
2503 	      BB_FOOTER (bb) = NEXT_INSN (insn);
2504 	    if (NEXT_INSN (insn))
2505 	      SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2506 	  }
2507 	else if (LABEL_P (insn))
2508 	  break;
2509     }
2510 }
2511 
2512 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2513    by an arbitrary number of CLOBBERs.  */
2514 static bool
2515 is_parallel_of_n_reg_sets (rtx pat, int n)
2516 {
2517   if (GET_CODE (pat) != PARALLEL)
2518     return false;
2519 
2520   int len = XVECLEN (pat, 0);
2521   if (len < n)
2522     return false;
2523 
2524   int i;
2525   for (i = 0; i < n; i++)
2526     if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2527 	|| !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2528       return false;
2529   for ( ; i < len; i++)
2530     if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2531 	|| XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2532       return false;
2533 
2534   return true;
2535 }
2536 
2537 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2538    CLOBBERs), can be split into individual SETs in that order, without
2539    changing semantics.  */
2540 static bool
2541 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2542 {
2543   if (!insn_nothrow_p (insn))
2544     return false;
2545 
2546   rtx pat = PATTERN (insn);
2547 
2548   int i, j;
2549   for (i = 0; i < n; i++)
2550     {
2551       if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2552 	return false;
2553 
2554       rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2555 
2556       for (j = i + 1; j < n; j++)
2557 	if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2558 	  return false;
2559     }
2560 
2561   return true;
2562 }
2563 
2564 /* Try to combine the insns I0, I1 and I2 into I3.
2565    Here I0, I1 and I2 appear earlier than I3.
2566    I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2567    I3.
2568 
2569    If we are combining more than two insns and the resulting insn is not
2570    recognized, try splitting it into two insns.  If that happens, I2 and I3
2571    are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2572    Otherwise, I0, I1 and I2 are pseudo-deleted.
2573 
2574    Return 0 if the combination does not work.  Then nothing is changed.
2575    If we did the combination, return the insn at which combine should
2576    resume scanning.
2577 
2578    Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2579    new direct jump instruction.
2580 
2581    LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2582    been I3 passed to an earlier try_combine within the same basic
2583    block.  */
2584 
2585 static rtx_insn *
2586 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2587 	     int *new_direct_jump_p, rtx_insn *last_combined_insn)
2588 {
2589   /* New patterns for I3 and I2, respectively.  */
2590   rtx newpat, newi2pat = 0;
2591   rtvec newpat_vec_with_clobbers = 0;
2592   int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2593   /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2594      dead.  */
2595   int added_sets_0, added_sets_1, added_sets_2;
2596   /* Total number of SETs to put into I3.  */
2597   int total_sets;
2598   /* Nonzero if I2's or I1's body now appears in I3.  */
2599   int i2_is_used = 0, i1_is_used = 0;
2600   /* INSN_CODEs for new I3, new I2, and user of condition code.  */
2601   int insn_code_number, i2_code_number = 0, other_code_number = 0;
2602   /* Contains I3 if the destination of I3 is used in its source, which means
2603      that the old life of I3 is being killed.  If that usage is placed into
2604      I2 and not in I3, a REG_DEAD note must be made.  */
2605   rtx i3dest_killed = 0;
2606   /* SET_DEST and SET_SRC of I2, I1 and I0.  */
2607   rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2608   /* Copy of SET_SRC of I1 and I0, if needed.  */
2609   rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2610   /* Set if I2DEST was reused as a scratch register.  */
2611   bool i2scratch = false;
2612   /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases.  */
2613   rtx i0pat = 0, i1pat = 0, i2pat = 0;
2614   /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC.  */
2615   int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2616   int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2617   int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2618   int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2619   /* Notes that must be added to REG_NOTES in I3 and I2.  */
2620   rtx new_i3_notes, new_i2_notes;
2621   /* Notes that we substituted I3 into I2 instead of the normal case.  */
2622   int i3_subst_into_i2 = 0;
2623   /* Notes that I1, I2 or I3 is a MULT operation.  */
2624   int have_mult = 0;
2625   int swap_i2i3 = 0;
2626   int changed_i3_dest = 0;
2627 
2628   int maxreg;
2629   rtx_insn *temp_insn;
2630   rtx temp_expr;
2631   struct insn_link *link;
2632   rtx other_pat = 0;
2633   rtx new_other_notes;
2634   int i;
2635 
2636   /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2637      never be).  */
2638   if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2639     return 0;
2640 
2641   /* Only try four-insn combinations when there's high likelihood of
2642      success.  Look for simple insns, such as loads of constants or
2643      binary operations involving a constant.  */
2644   if (i0)
2645     {
2646       int i;
2647       int ngood = 0;
2648       int nshift = 0;
2649       rtx set0, set3;
2650 
2651       if (!flag_expensive_optimizations)
2652 	return 0;
2653 
2654       for (i = 0; i < 4; i++)
2655 	{
2656 	  rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2657 	  rtx set = single_set (insn);
2658 	  rtx src;
2659 	  if (!set)
2660 	    continue;
2661 	  src = SET_SRC (set);
2662 	  if (CONSTANT_P (src))
2663 	    {
2664 	      ngood += 2;
2665 	      break;
2666 	    }
2667 	  else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2668 	    ngood++;
2669 	  else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2670 		   || GET_CODE (src) == LSHIFTRT)
2671 	    nshift++;
2672 	}
2673 
2674       /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2675 	 are likely manipulating its value.  Ideally we'll be able to combine
2676 	 all four insns into a bitfield insertion of some kind.
2677 
2678 	 Note the source in I0 might be inside a sign/zero extension and the
2679 	 memory modes in I0 and I3 might be different.  So extract the address
2680 	 from the destination of I3 and search for it in the source of I0.
2681 
2682 	 In the event that there's a match but the source/dest do not actually
2683 	 refer to the same memory, the worst that happens is we try some
2684 	 combinations that we wouldn't have otherwise.  */
2685       if ((set0 = single_set (i0))
2686 	  /* Ensure the source of SET0 is a MEM, possibly buried inside
2687 	     an extension.  */
2688 	  && (GET_CODE (SET_SRC (set0)) == MEM
2689 	      || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2690 		   || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2691 		  && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2692 	  && (set3 = single_set (i3))
2693 	  /* Ensure the destination of SET3 is a MEM.  */
2694 	  && GET_CODE (SET_DEST (set3)) == MEM
2695 	  /* Would it be better to extract the base address for the MEM
2696 	     in SET3 and look for that?  I don't have cases where it matters
2697 	     but I could envision such cases.  */
2698 	  && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2699 	ngood += 2;
2700 
2701       if (ngood < 2 && nshift < 2)
2702 	return 0;
2703     }
2704 
2705   /* Exit early if one of the insns involved can't be used for
2706      combinations.  */
2707   if (CALL_P (i2)
2708       || (i1 && CALL_P (i1))
2709       || (i0 && CALL_P (i0))
2710       || cant_combine_insn_p (i3)
2711       || cant_combine_insn_p (i2)
2712       || (i1 && cant_combine_insn_p (i1))
2713       || (i0 && cant_combine_insn_p (i0))
2714       || likely_spilled_retval_p (i3))
2715     return 0;
2716 
2717   combine_attempts++;
2718   undobuf.other_insn = 0;
2719 
2720   /* Reset the hard register usage information.  */
2721   CLEAR_HARD_REG_SET (newpat_used_regs);
2722 
2723   if (dump_file && (dump_flags & TDF_DETAILS))
2724     {
2725       if (i0)
2726 	fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2727 		 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2728       else if (i1)
2729 	fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2730 		 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2731       else
2732 	fprintf (dump_file, "\nTrying %d -> %d:\n",
2733 		 INSN_UID (i2), INSN_UID (i3));
2734     }
2735 
2736   /* If multiple insns feed into one of I2 or I3, they can be in any
2737      order.  To simplify the code below, reorder them in sequence.  */
2738   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2739     std::swap (i0, i2);
2740   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2741     std::swap (i0, i1);
2742   if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2743     std::swap (i1, i2);
2744 
2745   added_links_insn = 0;
2746 
2747   /* First check for one important special case that the code below will
2748      not handle.  Namely, the case where I1 is zero, I2 is a PARALLEL
2749      and I3 is a SET whose SET_SRC is a SET_DEST in I2.  In that case,
2750      we may be able to replace that destination with the destination of I3.
2751      This occurs in the common code where we compute both a quotient and
2752      remainder into a structure, in which case we want to do the computation
2753      directly into the structure to avoid register-register copies.
2754 
2755      Note that this case handles both multiple sets in I2 and also cases
2756      where I2 has a number of CLOBBERs inside the PARALLEL.
2757 
2758      We make very conservative checks below and only try to handle the
2759      most common cases of this.  For example, we only handle the case
2760      where I2 and I3 are adjacent to avoid making difficult register
2761      usage tests.  */
2762 
2763   if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2764       && REG_P (SET_SRC (PATTERN (i3)))
2765       && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2766       && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2767       && GET_CODE (PATTERN (i2)) == PARALLEL
2768       && ! side_effects_p (SET_DEST (PATTERN (i3)))
2769       /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2770 	 below would need to check what is inside (and reg_overlap_mentioned_p
2771 	 doesn't support those codes anyway).  Don't allow those destinations;
2772 	 the resulting insn isn't likely to be recognized anyway.  */
2773       && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2774       && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2775       && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2776 				    SET_DEST (PATTERN (i3)))
2777       && next_active_insn (i2) == i3)
2778     {
2779       rtx p2 = PATTERN (i2);
2780 
2781       /* Make sure that the destination of I3,
2782 	 which we are going to substitute into one output of I2,
2783 	 is not used within another output of I2.  We must avoid making this:
2784 	 (parallel [(set (mem (reg 69)) ...)
2785 		    (set (reg 69) ...)])
2786 	 which is not well-defined as to order of actions.
2787 	 (Besides, reload can't handle output reloads for this.)
2788 
2789 	 The problem can also happen if the dest of I3 is a memory ref,
2790 	 if another dest in I2 is an indirect memory ref.  */
2791       for (i = 0; i < XVECLEN (p2, 0); i++)
2792 	if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2793 	     || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2794 	    && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2795 					SET_DEST (XVECEXP (p2, 0, i))))
2796 	  break;
2797 
2798       /* Make sure this PARALLEL is not an asm.  We do not allow combining
2799 	 that usually (see can_combine_p), so do not here either.  */
2800       for (i = 0; i < XVECLEN (p2, 0); i++)
2801 	if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2802 	    && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2803 	  break;
2804 
2805       if (i == XVECLEN (p2, 0))
2806 	for (i = 0; i < XVECLEN (p2, 0); i++)
2807 	  if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2808 	      && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2809 	    {
2810 	      combine_merges++;
2811 
2812 	      subst_insn = i3;
2813 	      subst_low_luid = DF_INSN_LUID (i2);
2814 
2815 	      added_sets_2 = added_sets_1 = added_sets_0 = 0;
2816 	      i2src = SET_SRC (XVECEXP (p2, 0, i));
2817 	      i2dest = SET_DEST (XVECEXP (p2, 0, i));
2818 	      i2dest_killed = dead_or_set_p (i2, i2dest);
2819 
2820 	      /* Replace the dest in I2 with our dest and make the resulting
2821 		 insn the new pattern for I3.  Then skip to where we validate
2822 		 the pattern.  Everything was set up above.  */
2823 	      SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2824 	      newpat = p2;
2825 	      i3_subst_into_i2 = 1;
2826 	      goto validate_replacement;
2827 	    }
2828     }
2829 
2830   /* If I2 is setting a pseudo to a constant and I3 is setting some
2831      sub-part of it to another constant, merge them by making a new
2832      constant.  */
2833   if (i1 == 0
2834       && (temp_expr = single_set (i2)) != 0
2835       && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2836       && GET_CODE (PATTERN (i3)) == SET
2837       && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2838       && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2839     {
2840       rtx dest = SET_DEST (PATTERN (i3));
2841       int offset = -1;
2842       int width = 0;
2843 
2844       if (GET_CODE (dest) == ZERO_EXTRACT)
2845 	{
2846 	  if (CONST_INT_P (XEXP (dest, 1))
2847 	      && CONST_INT_P (XEXP (dest, 2)))
2848 	    {
2849 	      width = INTVAL (XEXP (dest, 1));
2850 	      offset = INTVAL (XEXP (dest, 2));
2851 	      dest = XEXP (dest, 0);
2852 	      if (BITS_BIG_ENDIAN)
2853 		offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2854 	    }
2855 	}
2856       else
2857 	{
2858 	  if (GET_CODE (dest) == STRICT_LOW_PART)
2859 	    dest = XEXP (dest, 0);
2860 	  width = GET_MODE_PRECISION (GET_MODE (dest));
2861 	  offset = 0;
2862 	}
2863 
2864       if (offset >= 0)
2865 	{
2866 	  /* If this is the low part, we're done.  */
2867 	  if (subreg_lowpart_p (dest))
2868 	    ;
2869 	  /* Handle the case where inner is twice the size of outer.  */
2870 	  else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr)))
2871 		   == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2872 	    offset += GET_MODE_PRECISION (GET_MODE (dest));
2873 	  /* Otherwise give up for now.  */
2874 	  else
2875 	    offset = -1;
2876 	}
2877 
2878       if (offset >= 0)
2879 	{
2880 	  rtx inner = SET_SRC (PATTERN (i3));
2881 	  rtx outer = SET_SRC (temp_expr);
2882 
2883 	  wide_int o
2884 	    = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp_expr))),
2885 			  std::make_pair (inner, GET_MODE (dest)),
2886 			  offset, width);
2887 
2888 	  combine_merges++;
2889 	  subst_insn = i3;
2890 	  subst_low_luid = DF_INSN_LUID (i2);
2891 	  added_sets_2 = added_sets_1 = added_sets_0 = 0;
2892 	  i2dest = SET_DEST (temp_expr);
2893 	  i2dest_killed = dead_or_set_p (i2, i2dest);
2894 
2895 	  /* Replace the source in I2 with the new constant and make the
2896 	     resulting insn the new pattern for I3.  Then skip to where we
2897 	     validate the pattern.  Everything was set up above.  */
2898 	  SUBST (SET_SRC (temp_expr),
2899 		 immed_wide_int_const (o, GET_MODE (SET_DEST (temp_expr))));
2900 
2901 	  newpat = PATTERN (i2);
2902 
2903           /* The dest of I3 has been replaced with the dest of I2.  */
2904           changed_i3_dest = 1;
2905 	  goto validate_replacement;
2906 	}
2907     }
2908 
2909   /* If we have no I1 and I2 looks like:
2910 	(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2911 		   (set Y OP)])
2912      make up a dummy I1 that is
2913 	(set Y OP)
2914      and change I2 to be
2915 	(set (reg:CC X) (compare:CC Y (const_int 0)))
2916 
2917      (We can ignore any trailing CLOBBERs.)
2918 
2919      This undoes a previous combination and allows us to match a branch-and-
2920      decrement insn.  */
2921 
2922   if (!HAVE_cc0 && i1 == 0
2923       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2924       && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2925 	  == MODE_CC)
2926       && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2927       && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2928       && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2929 		      SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2930       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2931       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2932     {
2933       /* We make I1 with the same INSN_UID as I2.  This gives it
2934 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
2935 	 never appear in the insn stream so giving it the same INSN_UID
2936 	 as I2 will not cause a problem.  */
2937 
2938       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2939 			 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2940 			 -1, NULL_RTX);
2941       INSN_UID (i1) = INSN_UID (i2);
2942 
2943       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2944       SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2945 	     SET_DEST (PATTERN (i1)));
2946       unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2947       SUBST_LINK (LOG_LINKS (i2),
2948 		  alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2949     }
2950 
2951   /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2952      make those two SETs separate I1 and I2 insns, and make an I0 that is
2953      the original I1.  */
2954   if (!HAVE_cc0 && i0 == 0
2955       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2956       && can_split_parallel_of_n_reg_sets (i2, 2)
2957       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2958       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
2959       && !find_reg_note (i2, REG_UNUSED, 0))
2960     {
2961       /* If there is no I1, there is no I0 either.  */
2962       i0 = i1;
2963 
2964       /* We make I1 with the same INSN_UID as I2.  This gives it
2965 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
2966 	 never appear in the insn stream so giving it the same INSN_UID
2967 	 as I2 will not cause a problem.  */
2968 
2969       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2970 			 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2971 			 -1, NULL_RTX);
2972       INSN_UID (i1) = INSN_UID (i2);
2973 
2974       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2975     }
2976 
2977   /* Verify that I2 and I1 are valid for combining.  */
2978   if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2979       || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
2980 				 &i1dest, &i1src))
2981       || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
2982 				 &i0dest, &i0src)))
2983     {
2984       undo_all ();
2985       return 0;
2986     }
2987 
2988   /* Record whether I2DEST is used in I2SRC and similarly for the other
2989      cases.  Knowing this will help in register status updating below.  */
2990   i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2991   i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2992   i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2993   i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2994   i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2995   i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2996   i2dest_killed = dead_or_set_p (i2, i2dest);
2997   i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2998   i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2999 
3000   /* For the earlier insns, determine which of the subsequent ones they
3001      feed.  */
3002   i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3003   i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3004   i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3005 			  : (!reg_overlap_mentioned_p (i1dest, i0dest)
3006 			     && reg_overlap_mentioned_p (i0dest, i2src))));
3007 
3008   /* Ensure that I3's pattern can be the destination of combines.  */
3009   if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3010 			  i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3011 			  i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3012 				 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3013 			  &i3dest_killed))
3014     {
3015       undo_all ();
3016       return 0;
3017     }
3018 
3019   /* See if any of the insns is a MULT operation.  Unless one is, we will
3020      reject a combination that is, since it must be slower.  Be conservative
3021      here.  */
3022   if (GET_CODE (i2src) == MULT
3023       || (i1 != 0 && GET_CODE (i1src) == MULT)
3024       || (i0 != 0 && GET_CODE (i0src) == MULT)
3025       || (GET_CODE (PATTERN (i3)) == SET
3026 	  && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3027     have_mult = 1;
3028 
3029   /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3030      We used to do this EXCEPT in one case: I3 has a post-inc in an
3031      output operand.  However, that exception can give rise to insns like
3032 	mov r3,(r3)+
3033      which is a famous insn on the PDP-11 where the value of r3 used as the
3034      source was model-dependent.  Avoid this sort of thing.  */
3035 
3036 #if 0
3037   if (!(GET_CODE (PATTERN (i3)) == SET
3038 	&& REG_P (SET_SRC (PATTERN (i3)))
3039 	&& MEM_P (SET_DEST (PATTERN (i3)))
3040 	&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3041 	    || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3042     /* It's not the exception.  */
3043 #endif
3044     if (AUTO_INC_DEC)
3045       {
3046 	rtx link;
3047 	for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3048 	  if (REG_NOTE_KIND (link) == REG_INC
3049 	      && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3050 		  || (i1 != 0
3051 		      && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3052 	    {
3053 	      undo_all ();
3054 	      return 0;
3055 	    }
3056       }
3057 
3058   /* See if the SETs in I1 or I2 need to be kept around in the merged
3059      instruction: whenever the value set there is still needed past I3.
3060      For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3061 
3062      For the SET in I1, we have two cases: if I1 and I2 independently feed
3063      into I3, the set in I1 needs to be kept around unless I1DEST dies
3064      or is set in I3.  Otherwise (if I1 feeds I2 which feeds I3), the set
3065      in I1 needs to be kept around unless I1DEST dies or is set in either
3066      I2 or I3.  The same considerations apply to I0.  */
3067 
3068   added_sets_2 = !dead_or_set_p (i3, i2dest);
3069 
3070   if (i1)
3071     added_sets_1 = !(dead_or_set_p (i3, i1dest)
3072 		     || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3073   else
3074     added_sets_1 = 0;
3075 
3076   if (i0)
3077     added_sets_0 =  !(dead_or_set_p (i3, i0dest)
3078 		      || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3079 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3080 			  && dead_or_set_p (i2, i0dest)));
3081   else
3082     added_sets_0 = 0;
3083 
3084   /* We are about to copy insns for the case where they need to be kept
3085      around.  Check that they can be copied in the merged instruction.  */
3086 
3087   if (targetm.cannot_copy_insn_p
3088       && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3089 	  || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3090 	  || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3091     {
3092       undo_all ();
3093       return 0;
3094     }
3095 
3096   /* If the set in I2 needs to be kept around, we must make a copy of
3097      PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3098      PATTERN (I2), we are only substituting for the original I1DEST, not into
3099      an already-substituted copy.  This also prevents making self-referential
3100      rtx.  If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3101      I2DEST.  */
3102 
3103   if (added_sets_2)
3104     {
3105       if (GET_CODE (PATTERN (i2)) == PARALLEL)
3106 	i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3107       else
3108 	i2pat = copy_rtx (PATTERN (i2));
3109     }
3110 
3111   if (added_sets_1)
3112     {
3113       if (GET_CODE (PATTERN (i1)) == PARALLEL)
3114 	i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3115       else
3116 	i1pat = copy_rtx (PATTERN (i1));
3117     }
3118 
3119   if (added_sets_0)
3120     {
3121       if (GET_CODE (PATTERN (i0)) == PARALLEL)
3122 	i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3123       else
3124 	i0pat = copy_rtx (PATTERN (i0));
3125     }
3126 
3127   combine_merges++;
3128 
3129   /* Substitute in the latest insn for the regs set by the earlier ones.  */
3130 
3131   maxreg = max_reg_num ();
3132 
3133   subst_insn = i3;
3134 
3135   /* Many machines that don't use CC0 have insns that can both perform an
3136      arithmetic operation and set the condition code.  These operations will
3137      be represented as a PARALLEL with the first element of the vector
3138      being a COMPARE of an arithmetic operation with the constant zero.
3139      The second element of the vector will set some pseudo to the result
3140      of the same arithmetic operation.  If we simplify the COMPARE, we won't
3141      match such a pattern and so will generate an extra insn.   Here we test
3142      for this case, where both the comparison and the operation result are
3143      needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3144      I2SRC.  Later we will make the PARALLEL that contains I2.  */
3145 
3146   if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3147       && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3148       && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3149       && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3150     {
3151       rtx newpat_dest;
3152       rtx *cc_use_loc = NULL;
3153       rtx_insn *cc_use_insn = NULL;
3154       rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3155       machine_mode compare_mode, orig_compare_mode;
3156       enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3157 
3158       newpat = PATTERN (i3);
3159       newpat_dest = SET_DEST (newpat);
3160       compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3161 
3162       if (undobuf.other_insn == 0
3163 	  && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3164 					    &cc_use_insn)))
3165 	{
3166 	  compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3167 	  compare_code = simplify_compare_const (compare_code,
3168 						 GET_MODE (i2dest), op0, &op1);
3169 	  target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3170 	}
3171 
3172       /* Do the rest only if op1 is const0_rtx, which may be the
3173 	 result of simplification.  */
3174       if (op1 == const0_rtx)
3175 	{
3176 	  /* If a single use of the CC is found, prepare to modify it
3177 	     when SELECT_CC_MODE returns a new CC-class mode, or when
3178 	     the above simplify_compare_const() returned a new comparison
3179 	     operator.  undobuf.other_insn is assigned the CC use insn
3180 	     when modifying it.  */
3181 	  if (cc_use_loc)
3182 	    {
3183 #ifdef SELECT_CC_MODE
3184 	      machine_mode new_mode
3185 		= SELECT_CC_MODE (compare_code, op0, op1);
3186 	      if (new_mode != orig_compare_mode
3187 		  && can_change_dest_mode (SET_DEST (newpat),
3188 					   added_sets_2, new_mode))
3189 		{
3190 		  unsigned int regno = REGNO (newpat_dest);
3191 		  compare_mode = new_mode;
3192 		  if (regno < FIRST_PSEUDO_REGISTER)
3193 		    newpat_dest = gen_rtx_REG (compare_mode, regno);
3194 		  else
3195 		    {
3196 		      SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3197 		      newpat_dest = regno_reg_rtx[regno];
3198 		    }
3199 		}
3200 #endif
3201 	      /* Cases for modifying the CC-using comparison.  */
3202 	      if (compare_code != orig_compare_code
3203 		  /* ??? Do we need to verify the zero rtx?  */
3204 		  && XEXP (*cc_use_loc, 1) == const0_rtx)
3205 		{
3206 		  /* Replace cc_use_loc with entire new RTX.  */
3207 		  SUBST (*cc_use_loc,
3208 			 gen_rtx_fmt_ee (compare_code, compare_mode,
3209 					 newpat_dest, const0_rtx));
3210 		  undobuf.other_insn = cc_use_insn;
3211 		}
3212 	      else if (compare_mode != orig_compare_mode)
3213 		{
3214 		  /* Just replace the CC reg with a new mode.  */
3215 		  SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3216 		  undobuf.other_insn = cc_use_insn;
3217 		}
3218 	    }
3219 
3220 	  /* Now we modify the current newpat:
3221 	     First, SET_DEST(newpat) is updated if the CC mode has been
3222 	     altered. For targets without SELECT_CC_MODE, this should be
3223 	     optimized away.  */
3224 	  if (compare_mode != orig_compare_mode)
3225 	    SUBST (SET_DEST (newpat), newpat_dest);
3226 	  /* This is always done to propagate i2src into newpat.  */
3227 	  SUBST (SET_SRC (newpat),
3228 		 gen_rtx_COMPARE (compare_mode, op0, op1));
3229 	  /* Create new version of i2pat if needed; the below PARALLEL
3230 	     creation needs this to work correctly.  */
3231 	  if (! rtx_equal_p (i2src, op0))
3232 	    i2pat = gen_rtx_SET (i2dest, op0);
3233 	  i2_is_used = 1;
3234 	}
3235     }
3236 
3237   if (i2_is_used == 0)
3238     {
3239       /* It is possible that the source of I2 or I1 may be performing
3240 	 an unneeded operation, such as a ZERO_EXTEND of something
3241 	 that is known to have the high part zero.  Handle that case
3242 	 by letting subst look at the inner insns.
3243 
3244 	 Another way to do this would be to have a function that tries
3245 	 to simplify a single insn instead of merging two or more
3246 	 insns.  We don't do this because of the potential of infinite
3247 	 loops and because of the potential extra memory required.
3248 	 However, doing it the way we are is a bit of a kludge and
3249 	 doesn't catch all cases.
3250 
3251 	 But only do this if -fexpensive-optimizations since it slows
3252 	 things down and doesn't usually win.
3253 
3254 	 This is not done in the COMPARE case above because the
3255 	 unmodified I2PAT is used in the PARALLEL and so a pattern
3256 	 with a modified I2SRC would not match.  */
3257 
3258       if (flag_expensive_optimizations)
3259 	{
3260 	  /* Pass pc_rtx so no substitutions are done, just
3261 	     simplifications.  */
3262 	  if (i1)
3263 	    {
3264 	      subst_low_luid = DF_INSN_LUID (i1);
3265 	      i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3266 	    }
3267 
3268 	  subst_low_luid = DF_INSN_LUID (i2);
3269 	  i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3270 	}
3271 
3272       n_occurrences = 0;		/* `subst' counts here */
3273       subst_low_luid = DF_INSN_LUID (i2);
3274 
3275       /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3276 	 copy of I2SRC each time we substitute it, in order to avoid creating
3277 	 self-referential RTL when we will be substituting I1SRC for I1DEST
3278 	 later.  Likewise if I0 feeds into I2, either directly or indirectly
3279 	 through I1, and I0DEST is in I0SRC.  */
3280       newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3281 		      (i1_feeds_i2_n && i1dest_in_i1src)
3282 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3283 			  && i0dest_in_i0src));
3284       substed_i2 = 1;
3285 
3286       /* Record whether I2's body now appears within I3's body.  */
3287       i2_is_used = n_occurrences;
3288     }
3289 
3290   /* If we already got a failure, don't try to do more.  Otherwise, try to
3291      substitute I1 if we have it.  */
3292 
3293   if (i1 && GET_CODE (newpat) != CLOBBER)
3294     {
3295       /* Check that an autoincrement side-effect on I1 has not been lost.
3296 	 This happens if I1DEST is mentioned in I2 and dies there, and
3297 	 has disappeared from the new pattern.  */
3298       if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3299 	   && i1_feeds_i2_n
3300 	   && dead_or_set_p (i2, i1dest)
3301 	   && !reg_overlap_mentioned_p (i1dest, newpat))
3302 	   /* Before we can do this substitution, we must redo the test done
3303 	      above (see detailed comments there) that ensures I1DEST isn't
3304 	      mentioned in any SETs in NEWPAT that are field assignments.  */
3305 	  || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3306 				0, 0, 0))
3307 	{
3308 	  undo_all ();
3309 	  return 0;
3310 	}
3311 
3312       n_occurrences = 0;
3313       subst_low_luid = DF_INSN_LUID (i1);
3314 
3315       /* If the following substitution will modify I1SRC, make a copy of it
3316 	 for the case where it is substituted for I1DEST in I2PAT later.  */
3317       if (added_sets_2 && i1_feeds_i2_n)
3318 	i1src_copy = copy_rtx (i1src);
3319 
3320       /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3321 	 copy of I1SRC each time we substitute it, in order to avoid creating
3322 	 self-referential RTL when we will be substituting I0SRC for I0DEST
3323 	 later.  */
3324       newpat = subst (newpat, i1dest, i1src, 0, 0,
3325 		      i0_feeds_i1_n && i0dest_in_i0src);
3326       substed_i1 = 1;
3327 
3328       /* Record whether I1's body now appears within I3's body.  */
3329       i1_is_used = n_occurrences;
3330     }
3331 
3332   /* Likewise for I0 if we have it.  */
3333 
3334   if (i0 && GET_CODE (newpat) != CLOBBER)
3335     {
3336       if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3337 	   && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3338 	       || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3339 	   && !reg_overlap_mentioned_p (i0dest, newpat))
3340 	  || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3341 				0, 0, 0))
3342 	{
3343 	  undo_all ();
3344 	  return 0;
3345 	}
3346 
3347       /* If the following substitution will modify I0SRC, make a copy of it
3348 	 for the case where it is substituted for I0DEST in I1PAT later.  */
3349       if (added_sets_1 && i0_feeds_i1_n)
3350 	i0src_copy = copy_rtx (i0src);
3351       /* And a copy for I0DEST in I2PAT substitution.  */
3352       if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3353 			   || (i0_feeds_i2_n)))
3354 	i0src_copy2 = copy_rtx (i0src);
3355 
3356       n_occurrences = 0;
3357       subst_low_luid = DF_INSN_LUID (i0);
3358       newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3359       substed_i0 = 1;
3360     }
3361 
3362   /* Fail if an autoincrement side-effect has been duplicated.  Be careful
3363      to count all the ways that I2SRC and I1SRC can be used.  */
3364   if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3365        && i2_is_used + added_sets_2 > 1)
3366       || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3367 	  && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3368 	      > 1))
3369       || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3370 	  && (n_occurrences + added_sets_0
3371 	      + (added_sets_1 && i0_feeds_i1_n)
3372 	      + (added_sets_2 && i0_feeds_i2_n)
3373 	      > 1))
3374       /* Fail if we tried to make a new register.  */
3375       || max_reg_num () != maxreg
3376       /* Fail if we couldn't do something and have a CLOBBER.  */
3377       || GET_CODE (newpat) == CLOBBER
3378       /* Fail if this new pattern is a MULT and we didn't have one before
3379 	 at the outer level.  */
3380       || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3381 	  && ! have_mult))
3382     {
3383       undo_all ();
3384       return 0;
3385     }
3386 
3387   /* If the actions of the earlier insns must be kept
3388      in addition to substituting them into the latest one,
3389      we must make a new PARALLEL for the latest insn
3390      to hold additional the SETs.  */
3391 
3392   if (added_sets_0 || added_sets_1 || added_sets_2)
3393     {
3394       int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3395       combine_extras++;
3396 
3397       if (GET_CODE (newpat) == PARALLEL)
3398 	{
3399 	  rtvec old = XVEC (newpat, 0);
3400 	  total_sets = XVECLEN (newpat, 0) + extra_sets;
3401 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3402 	  memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3403 		  sizeof (old->elem[0]) * old->num_elem);
3404 	}
3405       else
3406 	{
3407 	  rtx old = newpat;
3408 	  total_sets = 1 + extra_sets;
3409 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3410 	  XVECEXP (newpat, 0, 0) = old;
3411 	}
3412 
3413       if (added_sets_0)
3414 	XVECEXP (newpat, 0, --total_sets) = i0pat;
3415 
3416       if (added_sets_1)
3417 	{
3418 	  rtx t = i1pat;
3419 	  if (i0_feeds_i1_n)
3420 	    t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3421 
3422 	  XVECEXP (newpat, 0, --total_sets) = t;
3423 	}
3424       if (added_sets_2)
3425 	{
3426 	  rtx t = i2pat;
3427 	  if (i1_feeds_i2_n)
3428 	    t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3429 		       i0_feeds_i1_n && i0dest_in_i0src);
3430 	  if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3431 	    t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3432 
3433 	  XVECEXP (newpat, 0, --total_sets) = t;
3434 	}
3435     }
3436 
3437  validate_replacement:
3438 
3439   /* Note which hard regs this insn has as inputs.  */
3440   mark_used_regs_combine (newpat);
3441 
3442   /* If recog_for_combine fails, it strips existing clobbers.  If we'll
3443      consider splitting this pattern, we might need these clobbers.  */
3444   if (i1 && GET_CODE (newpat) == PARALLEL
3445       && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3446     {
3447       int len = XVECLEN (newpat, 0);
3448 
3449       newpat_vec_with_clobbers = rtvec_alloc (len);
3450       for (i = 0; i < len; i++)
3451 	RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3452     }
3453 
3454   /* We have recognized nothing yet.  */
3455   insn_code_number = -1;
3456 
3457   /* See if this is a PARALLEL of two SETs where one SET's destination is
3458      a register that is unused and this isn't marked as an instruction that
3459      might trap in an EH region.  In that case, we just need the other SET.
3460      We prefer this over the PARALLEL.
3461 
3462      This can occur when simplifying a divmod insn.  We *must* test for this
3463      case here because the code below that splits two independent SETs doesn't
3464      handle this case correctly when it updates the register status.
3465 
3466      It's pointless doing this if we originally had two sets, one from
3467      i3, and one from i2.  Combining then splitting the parallel results
3468      in the original i2 again plus an invalid insn (which we delete).
3469      The net effect is only to move instructions around, which makes
3470      debug info less accurate.  */
3471 
3472   if (!(added_sets_2 && i1 == 0)
3473       && is_parallel_of_n_reg_sets (newpat, 2)
3474       && asm_noperands (newpat) < 0)
3475     {
3476       rtx set0 = XVECEXP (newpat, 0, 0);
3477       rtx set1 = XVECEXP (newpat, 0, 1);
3478       rtx oldpat = newpat;
3479 
3480       if (((REG_P (SET_DEST (set1))
3481 	    && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3482 	   || (GET_CODE (SET_DEST (set1)) == SUBREG
3483 	       && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3484 	  && insn_nothrow_p (i3)
3485 	  && !side_effects_p (SET_SRC (set1)))
3486 	{
3487 	  newpat = set0;
3488 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3489 	}
3490 
3491       else if (((REG_P (SET_DEST (set0))
3492 		 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3493 		|| (GET_CODE (SET_DEST (set0)) == SUBREG
3494 		    && find_reg_note (i3, REG_UNUSED,
3495 				      SUBREG_REG (SET_DEST (set0)))))
3496 	       && insn_nothrow_p (i3)
3497 	       && !side_effects_p (SET_SRC (set0)))
3498 	{
3499 	  newpat = set1;
3500 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3501 
3502 	  if (insn_code_number >= 0)
3503 	    changed_i3_dest = 1;
3504 	}
3505 
3506       if (insn_code_number < 0)
3507 	newpat = oldpat;
3508     }
3509 
3510   /* Is the result of combination a valid instruction?  */
3511   if (insn_code_number < 0)
3512     insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3513 
3514   /* If we were combining three insns and the result is a simple SET
3515      with no ASM_OPERANDS that wasn't recognized, try to split it into two
3516      insns.  There are two ways to do this.  It can be split using a
3517      machine-specific method (like when you have an addition of a large
3518      constant) or by combine in the function find_split_point.  */
3519 
3520   if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3521       && asm_noperands (newpat) < 0)
3522     {
3523       rtx parallel, *split;
3524       rtx_insn *m_split_insn;
3525 
3526       /* See if the MD file can split NEWPAT.  If it can't, see if letting it
3527 	 use I2DEST as a scratch register will help.  In the latter case,
3528 	 convert I2DEST to the mode of the source of NEWPAT if we can.  */
3529 
3530       m_split_insn = combine_split_insns (newpat, i3);
3531 
3532       /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3533 	 inputs of NEWPAT.  */
3534 
3535       /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3536 	 possible to try that as a scratch reg.  This would require adding
3537 	 more code to make it work though.  */
3538 
3539       if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3540 	{
3541 	  machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3542 
3543 	  /* First try to split using the original register as a
3544 	     scratch register.  */
3545 	  parallel = gen_rtx_PARALLEL (VOIDmode,
3546 				       gen_rtvec (2, newpat,
3547 						  gen_rtx_CLOBBER (VOIDmode,
3548 								   i2dest)));
3549 	  m_split_insn = combine_split_insns (parallel, i3);
3550 
3551 	  /* If that didn't work, try changing the mode of I2DEST if
3552 	     we can.  */
3553 	  if (m_split_insn == 0
3554 	      && new_mode != GET_MODE (i2dest)
3555 	      && new_mode != VOIDmode
3556 	      && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3557 	    {
3558 	      machine_mode old_mode = GET_MODE (i2dest);
3559 	      rtx ni2dest;
3560 
3561 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3562 		ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3563 	      else
3564 		{
3565 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3566 		  ni2dest = regno_reg_rtx[REGNO (i2dest)];
3567 		}
3568 
3569 	      parallel = (gen_rtx_PARALLEL
3570 			  (VOIDmode,
3571 			   gen_rtvec (2, newpat,
3572 				      gen_rtx_CLOBBER (VOIDmode,
3573 						       ni2dest))));
3574 	      m_split_insn = combine_split_insns (parallel, i3);
3575 
3576 	      if (m_split_insn == 0
3577 		  && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3578 		{
3579 		  struct undo *buf;
3580 
3581 		  adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3582 		  buf = undobuf.undos;
3583 		  undobuf.undos = buf->next;
3584 		  buf->next = undobuf.frees;
3585 		  undobuf.frees = buf;
3586 		}
3587 	    }
3588 
3589 	  i2scratch = m_split_insn != 0;
3590 	}
3591 
3592       /* If recog_for_combine has discarded clobbers, try to use them
3593 	 again for the split.  */
3594       if (m_split_insn == 0 && newpat_vec_with_clobbers)
3595 	{
3596 	  parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3597 	  m_split_insn = combine_split_insns (parallel, i3);
3598 	}
3599 
3600       if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3601 	{
3602 	  rtx m_split_pat = PATTERN (m_split_insn);
3603 	  insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3604 	  if (insn_code_number >= 0)
3605 	    newpat = m_split_pat;
3606 	}
3607       else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3608 	       && (next_nonnote_nondebug_insn (i2) == i3
3609 		   || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3610 	{
3611 	  rtx i2set, i3set;
3612 	  rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3613 	  newi2pat = PATTERN (m_split_insn);
3614 
3615 	  i3set = single_set (NEXT_INSN (m_split_insn));
3616 	  i2set = single_set (m_split_insn);
3617 
3618 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3619 
3620 	  /* If I2 or I3 has multiple SETs, we won't know how to track
3621 	     register status, so don't use these insns.  If I2's destination
3622 	     is used between I2 and I3, we also can't use these insns.  */
3623 
3624 	  if (i2_code_number >= 0 && i2set && i3set
3625 	      && (next_nonnote_nondebug_insn (i2) == i3
3626 		  || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3627 	    insn_code_number = recog_for_combine (&newi3pat, i3,
3628 						  &new_i3_notes);
3629 	  if (insn_code_number >= 0)
3630 	    newpat = newi3pat;
3631 
3632 	  /* It is possible that both insns now set the destination of I3.
3633 	     If so, we must show an extra use of it.  */
3634 
3635 	  if (insn_code_number >= 0)
3636 	    {
3637 	      rtx new_i3_dest = SET_DEST (i3set);
3638 	      rtx new_i2_dest = SET_DEST (i2set);
3639 
3640 	      while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3641 		     || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3642 		     || GET_CODE (new_i3_dest) == SUBREG)
3643 		new_i3_dest = XEXP (new_i3_dest, 0);
3644 
3645 	      while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3646 		     || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3647 		     || GET_CODE (new_i2_dest) == SUBREG)
3648 		new_i2_dest = XEXP (new_i2_dest, 0);
3649 
3650 	      if (REG_P (new_i3_dest)
3651 		  && REG_P (new_i2_dest)
3652 		  && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3653 		  && REGNO (new_i2_dest) < reg_n_sets_max)
3654 		INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3655 	    }
3656 	}
3657 
3658       /* If we can split it and use I2DEST, go ahead and see if that
3659 	 helps things be recognized.  Verify that none of the registers
3660 	 are set between I2 and I3.  */
3661       if (insn_code_number < 0
3662           && (split = find_split_point (&newpat, i3, false)) != 0
3663 	  && (!HAVE_cc0 || REG_P (i2dest))
3664 	  /* We need I2DEST in the proper mode.  If it is a hard register
3665 	     or the only use of a pseudo, we can change its mode.
3666 	     Make sure we don't change a hard register to have a mode that
3667 	     isn't valid for it, or change the number of registers.  */
3668 	  && (GET_MODE (*split) == GET_MODE (i2dest)
3669 	      || GET_MODE (*split) == VOIDmode
3670 	      || can_change_dest_mode (i2dest, added_sets_2,
3671 				       GET_MODE (*split)))
3672 	  && (next_nonnote_nondebug_insn (i2) == i3
3673 	      || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3674 	  /* We can't overwrite I2DEST if its value is still used by
3675 	     NEWPAT.  */
3676 	  && ! reg_referenced_p (i2dest, newpat))
3677 	{
3678 	  rtx newdest = i2dest;
3679 	  enum rtx_code split_code = GET_CODE (*split);
3680 	  machine_mode split_mode = GET_MODE (*split);
3681 	  bool subst_done = false;
3682 	  newi2pat = NULL_RTX;
3683 
3684 	  i2scratch = true;
3685 
3686 	  /* *SPLIT may be part of I2SRC, so make sure we have the
3687 	     original expression around for later debug processing.
3688 	     We should not need I2SRC any more in other cases.  */
3689 	  if (MAY_HAVE_DEBUG_INSNS)
3690 	    i2src = copy_rtx (i2src);
3691 	  else
3692 	    i2src = NULL;
3693 
3694 	  /* Get NEWDEST as a register in the proper mode.  We have already
3695 	     validated that we can do this.  */
3696 	  if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3697 	    {
3698 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3699 		newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3700 	      else
3701 		{
3702 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3703 		  newdest = regno_reg_rtx[REGNO (i2dest)];
3704 		}
3705 	    }
3706 
3707 	  /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3708 	     an ASHIFT.  This can occur if it was inside a PLUS and hence
3709 	     appeared to be a memory address.  This is a kludge.  */
3710 	  if (split_code == MULT
3711 	      && CONST_INT_P (XEXP (*split, 1))
3712 	      && INTVAL (XEXP (*split, 1)) > 0
3713 	      && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3714 	    {
3715 	      SUBST (*split, gen_rtx_ASHIFT (split_mode,
3716 					     XEXP (*split, 0), GEN_INT (i)));
3717 	      /* Update split_code because we may not have a multiply
3718 		 anymore.  */
3719 	      split_code = GET_CODE (*split);
3720 	    }
3721 
3722 	  /* Similarly for (plus (mult FOO (const_int pow2))).  */
3723 	  if (split_code == PLUS
3724 	      && GET_CODE (XEXP (*split, 0)) == MULT
3725 	      && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3726 	      && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3727 	      && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3728 	    {
3729 	      rtx nsplit = XEXP (*split, 0);
3730 	      SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3731 					     XEXP (nsplit, 0), GEN_INT (i)));
3732 	      /* Update split_code because we may not have a multiply
3733 		 anymore.  */
3734 	      split_code = GET_CODE (*split);
3735 	    }
3736 
3737 #ifdef INSN_SCHEDULING
3738 	  /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3739 	     be written as a ZERO_EXTEND.  */
3740 	  if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3741 	    {
3742 	      /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3743 		 what it really is.  */
3744 	      if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3745 		  == SIGN_EXTEND)
3746 		SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3747 						    SUBREG_REG (*split)));
3748 	      else
3749 		SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3750 						    SUBREG_REG (*split)));
3751 	    }
3752 #endif
3753 
3754 	  /* Attempt to split binary operators using arithmetic identities.  */
3755 	  if (BINARY_P (SET_SRC (newpat))
3756 	      && split_mode == GET_MODE (SET_SRC (newpat))
3757 	      && ! side_effects_p (SET_SRC (newpat)))
3758 	    {
3759 	      rtx setsrc = SET_SRC (newpat);
3760 	      machine_mode mode = GET_MODE (setsrc);
3761 	      enum rtx_code code = GET_CODE (setsrc);
3762 	      rtx src_op0 = XEXP (setsrc, 0);
3763 	      rtx src_op1 = XEXP (setsrc, 1);
3764 
3765 	      /* Split "X = Y op Y" as "Z = Y; X = Z op Z".  */
3766 	      if (rtx_equal_p (src_op0, src_op1))
3767 		{
3768 		  newi2pat = gen_rtx_SET (newdest, src_op0);
3769 		  SUBST (XEXP (setsrc, 0), newdest);
3770 		  SUBST (XEXP (setsrc, 1), newdest);
3771 		  subst_done = true;
3772 		}
3773 	      /* Split "((P op Q) op R) op S" where op is PLUS or MULT.  */
3774 	      else if ((code == PLUS || code == MULT)
3775 		       && GET_CODE (src_op0) == code
3776 		       && GET_CODE (XEXP (src_op0, 0)) == code
3777 		       && (INTEGRAL_MODE_P (mode)
3778 			   || (FLOAT_MODE_P (mode)
3779 			       && flag_unsafe_math_optimizations)))
3780 		{
3781 		  rtx p = XEXP (XEXP (src_op0, 0), 0);
3782 		  rtx q = XEXP (XEXP (src_op0, 0), 1);
3783 		  rtx r = XEXP (src_op0, 1);
3784 		  rtx s = src_op1;
3785 
3786 		  /* Split both "((X op Y) op X) op Y" and
3787 		     "((X op Y) op Y) op X" as "T op T" where T is
3788 		     "X op Y".  */
3789 		  if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3790 		       || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3791 		    {
3792 		      newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3793 		      SUBST (XEXP (setsrc, 0), newdest);
3794 		      SUBST (XEXP (setsrc, 1), newdest);
3795 		      subst_done = true;
3796 		    }
3797 		  /* Split "((X op X) op Y) op Y)" as "T op T" where
3798 		     T is "X op Y".  */
3799 		  else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3800 		    {
3801 		      rtx tmp = simplify_gen_binary (code, mode, p, r);
3802 		      newi2pat = gen_rtx_SET (newdest, tmp);
3803 		      SUBST (XEXP (setsrc, 0), newdest);
3804 		      SUBST (XEXP (setsrc, 1), newdest);
3805 		      subst_done = true;
3806 		    }
3807 		}
3808 	    }
3809 
3810 	  if (!subst_done)
3811 	    {
3812 	      newi2pat = gen_rtx_SET (newdest, *split);
3813 	      SUBST (*split, newdest);
3814 	    }
3815 
3816 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3817 
3818 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3819 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3820 	  if (GET_CODE (newi2pat) == PARALLEL)
3821 	    for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3822 	      if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3823 		{
3824 		  rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3825 		  if (reg_overlap_mentioned_p (reg, newpat))
3826 		    {
3827 		      undo_all ();
3828 		      return 0;
3829 		    }
3830 		}
3831 
3832 	  /* If the split point was a MULT and we didn't have one before,
3833 	     don't use one now.  */
3834 	  if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3835 	    insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3836 	}
3837     }
3838 
3839   /* Check for a case where we loaded from memory in a narrow mode and
3840      then sign extended it, but we need both registers.  In that case,
3841      we have a PARALLEL with both loads from the same memory location.
3842      We can split this into a load from memory followed by a register-register
3843      copy.  This saves at least one insn, more if register allocation can
3844      eliminate the copy.
3845 
3846      We cannot do this if the destination of the first assignment is a
3847      condition code register or cc0.  We eliminate this case by making sure
3848      the SET_DEST and SET_SRC have the same mode.
3849 
3850      We cannot do this if the destination of the second assignment is
3851      a register that we have already assumed is zero-extended.  Similarly
3852      for a SUBREG of such a register.  */
3853 
3854   else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3855 	   && GET_CODE (newpat) == PARALLEL
3856 	   && XVECLEN (newpat, 0) == 2
3857 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3858 	   && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3859 	   && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3860 	       == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3861 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3862 	   && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3863 			   XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3864 	   && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3865 				   DF_INSN_LUID (i2))
3866 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3867 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3868 	   && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3869 		 (REG_P (temp_expr)
3870 		  && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3871 		  && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3872 		  && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3873 		  && (reg_stat[REGNO (temp_expr)].nonzero_bits
3874 		      != GET_MODE_MASK (word_mode))))
3875 	   && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3876 		 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3877 		     (REG_P (temp_expr)
3878 		      && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3879 		      && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3880 		      && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3881 		      && (reg_stat[REGNO (temp_expr)].nonzero_bits
3882 			  != GET_MODE_MASK (word_mode)))))
3883 	   && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3884 					 SET_SRC (XVECEXP (newpat, 0, 1)))
3885 	   && ! find_reg_note (i3, REG_UNUSED,
3886 			       SET_DEST (XVECEXP (newpat, 0, 0))))
3887     {
3888       rtx ni2dest;
3889 
3890       newi2pat = XVECEXP (newpat, 0, 0);
3891       ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3892       newpat = XVECEXP (newpat, 0, 1);
3893       SUBST (SET_SRC (newpat),
3894 	     gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3895       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3896 
3897       if (i2_code_number >= 0)
3898 	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3899 
3900       if (insn_code_number >= 0)
3901 	swap_i2i3 = 1;
3902     }
3903 
3904   /* Similarly, check for a case where we have a PARALLEL of two independent
3905      SETs but we started with three insns.  In this case, we can do the sets
3906      as two separate insns.  This case occurs when some SET allows two
3907      other insns to combine, but the destination of that SET is still live.
3908 
3909      Also do this if we started with two insns and (at least) one of the
3910      resulting sets is a noop; this noop will be deleted later.  */
3911 
3912   else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3913 	   && GET_CODE (newpat) == PARALLEL
3914 	   && XVECLEN (newpat, 0) == 2
3915 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3916 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3917 	   && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3918 		  || set_noop_p (XVECEXP (newpat, 0, 1)))
3919 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3920 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3921 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3922 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3923 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3924 				  XVECEXP (newpat, 0, 0))
3925 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3926 				  XVECEXP (newpat, 0, 1))
3927 	   && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3928 		 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3929     {
3930       rtx set0 = XVECEXP (newpat, 0, 0);
3931       rtx set1 = XVECEXP (newpat, 0, 1);
3932 
3933       /* Normally, it doesn't matter which of the two is done first,
3934 	 but the one that references cc0 can't be the second, and
3935 	 one which uses any regs/memory set in between i2 and i3 can't
3936 	 be first.  The PARALLEL might also have been pre-existing in i3,
3937 	 so we need to make sure that we won't wrongly hoist a SET to i2
3938 	 that would conflict with a death note present in there.  */
3939       if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3940 	  && !(REG_P (SET_DEST (set1))
3941 	       && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3942 	  && !(GET_CODE (SET_DEST (set1)) == SUBREG
3943 	       && find_reg_note (i2, REG_DEAD,
3944 				 SUBREG_REG (SET_DEST (set1))))
3945 	  && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3946 	  /* If I3 is a jump, ensure that set0 is a jump so that
3947 	     we do not create invalid RTL.  */
3948 	  && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3949 	 )
3950 	{
3951 	  newi2pat = set1;
3952 	  newpat = set0;
3953 	}
3954       else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3955 	       && !(REG_P (SET_DEST (set0))
3956 		    && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3957 	       && !(GET_CODE (SET_DEST (set0)) == SUBREG
3958 		    && find_reg_note (i2, REG_DEAD,
3959 				      SUBREG_REG (SET_DEST (set0))))
3960 	       && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
3961 	       /* If I3 is a jump, ensure that set1 is a jump so that
3962 		  we do not create invalid RTL.  */
3963 	       && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3964 	      )
3965 	{
3966 	  newi2pat = set0;
3967 	  newpat = set1;
3968 	}
3969       else
3970 	{
3971 	  undo_all ();
3972 	  return 0;
3973 	}
3974 
3975       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3976 
3977       if (i2_code_number >= 0)
3978 	{
3979 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3980 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3981 	  if (GET_CODE (newi2pat) == PARALLEL)
3982 	    {
3983 	      for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3984 		if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3985 		  {
3986 		    rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3987 		    if (reg_overlap_mentioned_p (reg, newpat))
3988 		      {
3989 			undo_all ();
3990 			return 0;
3991 		      }
3992 		  }
3993 	    }
3994 
3995 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3996 	}
3997     }
3998 
3999   /* If it still isn't recognized, fail and change things back the way they
4000      were.  */
4001   if ((insn_code_number < 0
4002        /* Is the result a reasonable ASM_OPERANDS?  */
4003        && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4004     {
4005       undo_all ();
4006       return 0;
4007     }
4008 
4009   /* If we had to change another insn, make sure it is valid also.  */
4010   if (undobuf.other_insn)
4011     {
4012       CLEAR_HARD_REG_SET (newpat_used_regs);
4013 
4014       other_pat = PATTERN (undobuf.other_insn);
4015       other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4016 					     &new_other_notes);
4017 
4018       if (other_code_number < 0 && ! check_asm_operands (other_pat))
4019 	{
4020 	  undo_all ();
4021 	  return 0;
4022 	}
4023     }
4024 
4025   /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4026      they are adjacent to each other or not.  */
4027   if (HAVE_cc0)
4028     {
4029       rtx_insn *p = prev_nonnote_insn (i3);
4030       if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4031 	  && sets_cc0_p (newi2pat))
4032 	{
4033 	  undo_all ();
4034 	  return 0;
4035 	}
4036     }
4037 
4038   /* Only allow this combination if insn_rtx_costs reports that the
4039      replacement instructions are cheaper than the originals.  */
4040   if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4041     {
4042       undo_all ();
4043       return 0;
4044     }
4045 
4046   if (MAY_HAVE_DEBUG_INSNS)
4047     {
4048       struct undo *undo;
4049 
4050       for (undo = undobuf.undos; undo; undo = undo->next)
4051 	if (undo->kind == UNDO_MODE)
4052 	  {
4053 	    rtx reg = *undo->where.r;
4054 	    machine_mode new_mode = GET_MODE (reg);
4055 	    machine_mode old_mode = undo->old_contents.m;
4056 
4057 	    /* Temporarily revert mode back.  */
4058 	    adjust_reg_mode (reg, old_mode);
4059 
4060 	    if (reg == i2dest && i2scratch)
4061 	      {
4062 		/* If we used i2dest as a scratch register with a
4063 		   different mode, substitute it for the original
4064 		   i2src while its original mode is temporarily
4065 		   restored, and then clear i2scratch so that we don't
4066 		   do it again later.  */
4067 		propagate_for_debug (i2, last_combined_insn, reg, i2src,
4068 				     this_basic_block);
4069 		i2scratch = false;
4070 		/* Put back the new mode.  */
4071 		adjust_reg_mode (reg, new_mode);
4072 	      }
4073 	    else
4074 	      {
4075 		rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4076 		rtx_insn *first, *last;
4077 
4078 		if (reg == i2dest)
4079 		  {
4080 		    first = i2;
4081 		    last = last_combined_insn;
4082 		  }
4083 		else
4084 		  {
4085 		    first = i3;
4086 		    last = undobuf.other_insn;
4087 		    gcc_assert (last);
4088 		    if (DF_INSN_LUID (last)
4089 			< DF_INSN_LUID (last_combined_insn))
4090 		      last = last_combined_insn;
4091 		  }
4092 
4093 		/* We're dealing with a reg that changed mode but not
4094 		   meaning, so we want to turn it into a subreg for
4095 		   the new mode.  However, because of REG sharing and
4096 		   because its mode had already changed, we have to do
4097 		   it in two steps.  First, replace any debug uses of
4098 		   reg, with its original mode temporarily restored,
4099 		   with this copy we have created; then, replace the
4100 		   copy with the SUBREG of the original shared reg,
4101 		   once again changed to the new mode.  */
4102 		propagate_for_debug (first, last, reg, tempreg,
4103 				     this_basic_block);
4104 		adjust_reg_mode (reg, new_mode);
4105 		propagate_for_debug (first, last, tempreg,
4106 				     lowpart_subreg (old_mode, reg, new_mode),
4107 				     this_basic_block);
4108 	      }
4109 	  }
4110     }
4111 
4112   /* If we will be able to accept this, we have made a
4113      change to the destination of I3.  This requires us to
4114      do a few adjustments.  */
4115 
4116   if (changed_i3_dest)
4117     {
4118       PATTERN (i3) = newpat;
4119       adjust_for_new_dest (i3);
4120     }
4121 
4122   /* We now know that we can do this combination.  Merge the insns and
4123      update the status of registers and LOG_LINKS.  */
4124 
4125   if (undobuf.other_insn)
4126     {
4127       rtx note, next;
4128 
4129       PATTERN (undobuf.other_insn) = other_pat;
4130 
4131       /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4132 	 ensure that they are still valid.  Then add any non-duplicate
4133 	 notes added by recog_for_combine.  */
4134       for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4135 	{
4136 	  next = XEXP (note, 1);
4137 
4138 	  if ((REG_NOTE_KIND (note) == REG_DEAD
4139 	       && !reg_referenced_p (XEXP (note, 0),
4140 				     PATTERN (undobuf.other_insn)))
4141 	      ||(REG_NOTE_KIND (note) == REG_UNUSED
4142 		 && !reg_set_p (XEXP (note, 0),
4143 				PATTERN (undobuf.other_insn))))
4144 	    remove_note (undobuf.other_insn, note);
4145 	}
4146 
4147       distribute_notes  (new_other_notes, undobuf.other_insn,
4148 			undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4149 			NULL_RTX);
4150     }
4151 
4152   if (swap_i2i3)
4153     {
4154       rtx_insn *insn;
4155       struct insn_link *link;
4156       rtx ni2dest;
4157 
4158       /* I3 now uses what used to be its destination and which is now
4159 	 I2's destination.  This requires us to do a few adjustments.  */
4160       PATTERN (i3) = newpat;
4161       adjust_for_new_dest (i3);
4162 
4163       /* We need a LOG_LINK from I3 to I2.  But we used to have one,
4164 	 so we still will.
4165 
4166 	 However, some later insn might be using I2's dest and have
4167 	 a LOG_LINK pointing at I3.  We must remove this link.
4168 	 The simplest way to remove the link is to point it at I1,
4169 	 which we know will be a NOTE.  */
4170 
4171       /* newi2pat is usually a SET here; however, recog_for_combine might
4172 	 have added some clobbers.  */
4173       if (GET_CODE (newi2pat) == PARALLEL)
4174 	ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4175       else
4176 	ni2dest = SET_DEST (newi2pat);
4177 
4178       for (insn = NEXT_INSN (i3);
4179 	   insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4180 		    || insn != BB_HEAD (this_basic_block->next_bb));
4181 	   insn = NEXT_INSN (insn))
4182 	{
4183 	  if (NONDEBUG_INSN_P (insn)
4184 	      && reg_referenced_p (ni2dest, PATTERN (insn)))
4185 	    {
4186 	      FOR_EACH_LOG_LINK (link, insn)
4187 		if (link->insn == i3)
4188 		  link->insn = i1;
4189 
4190 	      break;
4191 	    }
4192 	}
4193     }
4194 
4195   {
4196     rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4197     struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4198     rtx midnotes = 0;
4199     int from_luid;
4200     /* Compute which registers we expect to eliminate.  newi2pat may be setting
4201        either i3dest or i2dest, so we must check it.  */
4202     rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4203 		   || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4204 		   || !i2dest_killed
4205 		   ? 0 : i2dest);
4206     /* For i1, we need to compute both local elimination and global
4207        elimination information with respect to newi2pat because i1dest
4208        may be the same as i3dest, in which case newi2pat may be setting
4209        i1dest.  Global information is used when distributing REG_DEAD
4210        note for i2 and i3, in which case it does matter if newi2pat sets
4211        i1dest or not.
4212 
4213        Local information is used when distributing REG_DEAD note for i1,
4214        in which case it doesn't matter if newi2pat sets i1dest or not.
4215        See PR62151, if we have four insns combination:
4216 	   i0: r0 <- i0src
4217 	   i1: r1 <- i1src (using r0)
4218 		     REG_DEAD (r0)
4219 	   i2: r0 <- i2src (using r1)
4220 	   i3: r3 <- i3src (using r0)
4221 	   ix: using r0
4222        From i1's point of view, r0 is eliminated, no matter if it is set
4223        by newi2pat or not.  In other words, REG_DEAD info for r0 in i1
4224        should be discarded.
4225 
4226        Note local information only affects cases in forms like "I1->I2->I3",
4227        "I0->I1->I2->I3" or "I0&I1->I2, I2->I3".  For other cases like
4228        "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4229        i0dest anyway.  */
4230     rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4231 			 || !i1dest_killed
4232 			 ? 0 : i1dest);
4233     rtx elim_i1 = (local_elim_i1 == 0
4234 		   || (newi2pat && reg_set_p (i1dest, newi2pat))
4235 		   ? 0 : i1dest);
4236     /* Same case as i1.  */
4237     rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4238 			 ? 0 : i0dest);
4239     rtx elim_i0 = (local_elim_i0 == 0
4240 		   || (newi2pat && reg_set_p (i0dest, newi2pat))
4241 		   ? 0 : i0dest);
4242 
4243     /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4244        clear them.  */
4245     i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4246     i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4247     if (i1)
4248       i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4249     if (i0)
4250       i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4251 
4252     /* Ensure that we do not have something that should not be shared but
4253        occurs multiple times in the new insns.  Check this by first
4254        resetting all the `used' flags and then copying anything is shared.  */
4255 
4256     reset_used_flags (i3notes);
4257     reset_used_flags (i2notes);
4258     reset_used_flags (i1notes);
4259     reset_used_flags (i0notes);
4260     reset_used_flags (newpat);
4261     reset_used_flags (newi2pat);
4262     if (undobuf.other_insn)
4263       reset_used_flags (PATTERN (undobuf.other_insn));
4264 
4265     i3notes = copy_rtx_if_shared (i3notes);
4266     i2notes = copy_rtx_if_shared (i2notes);
4267     i1notes = copy_rtx_if_shared (i1notes);
4268     i0notes = copy_rtx_if_shared (i0notes);
4269     newpat = copy_rtx_if_shared (newpat);
4270     newi2pat = copy_rtx_if_shared (newi2pat);
4271     if (undobuf.other_insn)
4272       reset_used_flags (PATTERN (undobuf.other_insn));
4273 
4274     INSN_CODE (i3) = insn_code_number;
4275     PATTERN (i3) = newpat;
4276 
4277     if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4278       {
4279 	rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4280 
4281 	reset_used_flags (call_usage);
4282 	call_usage = copy_rtx (call_usage);
4283 
4284 	if (substed_i2)
4285 	  {
4286 	    /* I2SRC must still be meaningful at this point.  Some splitting
4287 	       operations can invalidate I2SRC, but those operations do not
4288 	       apply to calls.  */
4289 	    gcc_assert (i2src);
4290 	    replace_rtx (call_usage, i2dest, i2src);
4291 	  }
4292 
4293 	if (substed_i1)
4294 	  replace_rtx (call_usage, i1dest, i1src);
4295 	if (substed_i0)
4296 	  replace_rtx (call_usage, i0dest, i0src);
4297 
4298 	CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4299       }
4300 
4301     if (undobuf.other_insn)
4302       INSN_CODE (undobuf.other_insn) = other_code_number;
4303 
4304     /* We had one special case above where I2 had more than one set and
4305        we replaced a destination of one of those sets with the destination
4306        of I3.  In that case, we have to update LOG_LINKS of insns later
4307        in this basic block.  Note that this (expensive) case is rare.
4308 
4309        Also, in this case, we must pretend that all REG_NOTEs for I2
4310        actually came from I3, so that REG_UNUSED notes from I2 will be
4311        properly handled.  */
4312 
4313     if (i3_subst_into_i2)
4314       {
4315 	for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4316 	  if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4317 	       || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4318 	      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4319 	      && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4320 	      && ! find_reg_note (i2, REG_UNUSED,
4321 				  SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4322 	    for (temp_insn = NEXT_INSN (i2);
4323 		 temp_insn
4324 		 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4325 		     || BB_HEAD (this_basic_block) != temp_insn);
4326 		 temp_insn = NEXT_INSN (temp_insn))
4327 	      if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4328 		FOR_EACH_LOG_LINK (link, temp_insn)
4329 		  if (link->insn == i2)
4330 		    link->insn = i3;
4331 
4332 	if (i3notes)
4333 	  {
4334 	    rtx link = i3notes;
4335 	    while (XEXP (link, 1))
4336 	      link = XEXP (link, 1);
4337 	    XEXP (link, 1) = i2notes;
4338 	  }
4339 	else
4340 	  i3notes = i2notes;
4341 	i2notes = 0;
4342       }
4343 
4344     LOG_LINKS (i3) = NULL;
4345     REG_NOTES (i3) = 0;
4346     LOG_LINKS (i2) = NULL;
4347     REG_NOTES (i2) = 0;
4348 
4349     if (newi2pat)
4350       {
4351 	if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4352 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4353 			       this_basic_block);
4354 	INSN_CODE (i2) = i2_code_number;
4355 	PATTERN (i2) = newi2pat;
4356       }
4357     else
4358       {
4359 	if (MAY_HAVE_DEBUG_INSNS && i2src)
4360 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4361 			       this_basic_block);
4362 	SET_INSN_DELETED (i2);
4363       }
4364 
4365     if (i1)
4366       {
4367 	LOG_LINKS (i1) = NULL;
4368 	REG_NOTES (i1) = 0;
4369 	if (MAY_HAVE_DEBUG_INSNS)
4370 	  propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4371 			       this_basic_block);
4372 	SET_INSN_DELETED (i1);
4373       }
4374 
4375     if (i0)
4376       {
4377 	LOG_LINKS (i0) = NULL;
4378 	REG_NOTES (i0) = 0;
4379 	if (MAY_HAVE_DEBUG_INSNS)
4380 	  propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4381 			       this_basic_block);
4382 	SET_INSN_DELETED (i0);
4383       }
4384 
4385     /* Get death notes for everything that is now used in either I3 or
4386        I2 and used to die in a previous insn.  If we built two new
4387        patterns, move from I1 to I2 then I2 to I3 so that we get the
4388        proper movement on registers that I2 modifies.  */
4389 
4390     if (i0)
4391       from_luid = DF_INSN_LUID (i0);
4392     else if (i1)
4393       from_luid = DF_INSN_LUID (i1);
4394     else
4395       from_luid = DF_INSN_LUID (i2);
4396     if (newi2pat)
4397       move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4398     move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4399 
4400     /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3.  */
4401     if (i3notes)
4402       distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4403 			elim_i2, elim_i1, elim_i0);
4404     if (i2notes)
4405       distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4406 			elim_i2, elim_i1, elim_i0);
4407     if (i1notes)
4408       distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4409 			elim_i2, local_elim_i1, local_elim_i0);
4410     if (i0notes)
4411       distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4412 			elim_i2, elim_i1, local_elim_i0);
4413     if (midnotes)
4414       distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4415 			elim_i2, elim_i1, elim_i0);
4416 
4417     /* Distribute any notes added to I2 or I3 by recog_for_combine.  We
4418        know these are REG_UNUSED and want them to go to the desired insn,
4419        so we always pass it as i3.  */
4420 
4421     if (newi2pat && new_i2_notes)
4422       distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4423 			NULL_RTX);
4424 
4425     if (new_i3_notes)
4426       distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4427 			NULL_RTX);
4428 
4429     /* If I3DEST was used in I3SRC, it really died in I3.  We may need to
4430        put a REG_DEAD note for it somewhere.  If NEWI2PAT exists and sets
4431        I3DEST, the death must be somewhere before I2, not I3.  If we passed I3
4432        in that case, it might delete I2.  Similarly for I2 and I1.
4433        Show an additional death due to the REG_DEAD note we make here.  If
4434        we discard it in distribute_notes, we will decrement it again.  */
4435 
4436     if (i3dest_killed)
4437       {
4438 	rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4439 	if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4440 	  distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4441 			    elim_i1, elim_i0);
4442 	else
4443 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4444 			    elim_i2, elim_i1, elim_i0);
4445       }
4446 
4447     if (i2dest_in_i2src)
4448       {
4449 	rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4450 	if (newi2pat && reg_set_p (i2dest, newi2pat))
4451 	  distribute_notes (new_note,  NULL, i2, NULL, NULL_RTX,
4452 			    NULL_RTX, NULL_RTX);
4453 	else
4454 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4455 			    NULL_RTX, NULL_RTX, NULL_RTX);
4456       }
4457 
4458     if (i1dest_in_i1src)
4459       {
4460 	rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4461 	if (newi2pat && reg_set_p (i1dest, newi2pat))
4462 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4463 			    NULL_RTX, NULL_RTX);
4464 	else
4465 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4466 			    NULL_RTX, NULL_RTX, NULL_RTX);
4467       }
4468 
4469     if (i0dest_in_i0src)
4470       {
4471 	rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4472 	if (newi2pat && reg_set_p (i0dest, newi2pat))
4473 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4474 			    NULL_RTX, NULL_RTX);
4475 	else
4476 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4477 			    NULL_RTX, NULL_RTX, NULL_RTX);
4478       }
4479 
4480     distribute_links (i3links);
4481     distribute_links (i2links);
4482     distribute_links (i1links);
4483     distribute_links (i0links);
4484 
4485     if (REG_P (i2dest))
4486       {
4487 	struct insn_link *link;
4488 	rtx_insn *i2_insn = 0;
4489 	rtx i2_val = 0, set;
4490 
4491 	/* The insn that used to set this register doesn't exist, and
4492 	   this life of the register may not exist either.  See if one of
4493 	   I3's links points to an insn that sets I2DEST.  If it does,
4494 	   that is now the last known value for I2DEST. If we don't update
4495 	   this and I2 set the register to a value that depended on its old
4496 	   contents, we will get confused.  If this insn is used, thing
4497 	   will be set correctly in combine_instructions.  */
4498 	FOR_EACH_LOG_LINK (link, i3)
4499 	  if ((set = single_set (link->insn)) != 0
4500 	      && rtx_equal_p (i2dest, SET_DEST (set)))
4501 	    i2_insn = link->insn, i2_val = SET_SRC (set);
4502 
4503 	record_value_for_reg (i2dest, i2_insn, i2_val);
4504 
4505 	/* If the reg formerly set in I2 died only once and that was in I3,
4506 	   zero its use count so it won't make `reload' do any work.  */
4507 	if (! added_sets_2
4508 	    && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4509 	    && ! i2dest_in_i2src
4510 	    && REGNO (i2dest) < reg_n_sets_max)
4511 	  INC_REG_N_SETS (REGNO (i2dest), -1);
4512       }
4513 
4514     if (i1 && REG_P (i1dest))
4515       {
4516 	struct insn_link *link;
4517 	rtx_insn *i1_insn = 0;
4518 	rtx i1_val = 0, set;
4519 
4520 	FOR_EACH_LOG_LINK (link, i3)
4521 	  if ((set = single_set (link->insn)) != 0
4522 	      && rtx_equal_p (i1dest, SET_DEST (set)))
4523 	    i1_insn = link->insn, i1_val = SET_SRC (set);
4524 
4525 	record_value_for_reg (i1dest, i1_insn, i1_val);
4526 
4527 	if (! added_sets_1
4528 	    && ! i1dest_in_i1src
4529 	    && REGNO (i1dest) < reg_n_sets_max)
4530 	  INC_REG_N_SETS (REGNO (i1dest), -1);
4531       }
4532 
4533     if (i0 && REG_P (i0dest))
4534       {
4535 	struct insn_link *link;
4536 	rtx_insn *i0_insn = 0;
4537 	rtx i0_val = 0, set;
4538 
4539 	FOR_EACH_LOG_LINK (link, i3)
4540 	  if ((set = single_set (link->insn)) != 0
4541 	      && rtx_equal_p (i0dest, SET_DEST (set)))
4542 	    i0_insn = link->insn, i0_val = SET_SRC (set);
4543 
4544 	record_value_for_reg (i0dest, i0_insn, i0_val);
4545 
4546 	if (! added_sets_0
4547 	    && ! i0dest_in_i0src
4548 	    && REGNO (i0dest) < reg_n_sets_max)
4549 	  INC_REG_N_SETS (REGNO (i0dest), -1);
4550       }
4551 
4552     /* Update reg_stat[].nonzero_bits et al for any changes that may have
4553        been made to this insn.  The order is important, because newi2pat
4554        can affect nonzero_bits of newpat.  */
4555     if (newi2pat)
4556       note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4557     note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4558   }
4559 
4560   if (undobuf.other_insn != NULL_RTX)
4561     {
4562       if (dump_file)
4563 	{
4564 	  fprintf (dump_file, "modifying other_insn ");
4565 	  dump_insn_slim (dump_file, undobuf.other_insn);
4566 	}
4567       df_insn_rescan (undobuf.other_insn);
4568     }
4569 
4570   if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4571     {
4572       if (dump_file)
4573 	{
4574 	  fprintf (dump_file, "modifying insn i0 ");
4575 	  dump_insn_slim (dump_file, i0);
4576 	}
4577       df_insn_rescan (i0);
4578     }
4579 
4580   if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4581     {
4582       if (dump_file)
4583 	{
4584 	  fprintf (dump_file, "modifying insn i1 ");
4585 	  dump_insn_slim (dump_file, i1);
4586 	}
4587       df_insn_rescan (i1);
4588     }
4589 
4590   if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4591     {
4592       if (dump_file)
4593 	{
4594 	  fprintf (dump_file, "modifying insn i2 ");
4595 	  dump_insn_slim (dump_file, i2);
4596 	}
4597       df_insn_rescan (i2);
4598     }
4599 
4600   if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4601     {
4602       if (dump_file)
4603 	{
4604 	  fprintf (dump_file, "modifying insn i3 ");
4605 	  dump_insn_slim (dump_file, i3);
4606 	}
4607       df_insn_rescan (i3);
4608     }
4609 
4610   /* Set new_direct_jump_p if a new return or simple jump instruction
4611      has been created.  Adjust the CFG accordingly.  */
4612   if (returnjump_p (i3) || any_uncondjump_p (i3))
4613     {
4614       *new_direct_jump_p = 1;
4615       mark_jump_label (PATTERN (i3), i3, 0);
4616       update_cfg_for_uncondjump (i3);
4617     }
4618 
4619   if (undobuf.other_insn != NULL_RTX
4620       && (returnjump_p (undobuf.other_insn)
4621 	  || any_uncondjump_p (undobuf.other_insn)))
4622     {
4623       *new_direct_jump_p = 1;
4624       update_cfg_for_uncondjump (undobuf.other_insn);
4625     }
4626 
4627   /* A noop might also need cleaning up of CFG, if it comes from the
4628      simplification of a jump.  */
4629   if (JUMP_P (i3)
4630       && GET_CODE (newpat) == SET
4631       && SET_SRC (newpat) == pc_rtx
4632       && SET_DEST (newpat) == pc_rtx)
4633     {
4634       *new_direct_jump_p = 1;
4635       update_cfg_for_uncondjump (i3);
4636     }
4637 
4638   if (undobuf.other_insn != NULL_RTX
4639       && JUMP_P (undobuf.other_insn)
4640       && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4641       && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4642       && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4643     {
4644       *new_direct_jump_p = 1;
4645       update_cfg_for_uncondjump (undobuf.other_insn);
4646     }
4647 
4648   combine_successes++;
4649   undo_commit ();
4650 
4651   if (added_links_insn
4652       && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4653       && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4654     return added_links_insn;
4655   else
4656     return newi2pat ? i2 : i3;
4657 }
4658 
4659 /* Get a marker for undoing to the current state.  */
4660 
4661 static void *
4662 get_undo_marker (void)
4663 {
4664   return undobuf.undos;
4665 }
4666 
4667 /* Undo the modifications up to the marker.  */
4668 
4669 static void
4670 undo_to_marker (void *marker)
4671 {
4672   struct undo *undo, *next;
4673 
4674   for (undo = undobuf.undos; undo != marker; undo = next)
4675     {
4676       gcc_assert (undo);
4677 
4678       next = undo->next;
4679       switch (undo->kind)
4680 	{
4681 	case UNDO_RTX:
4682 	  *undo->where.r = undo->old_contents.r;
4683 	  break;
4684 	case UNDO_INT:
4685 	  *undo->where.i = undo->old_contents.i;
4686 	  break;
4687 	case UNDO_MODE:
4688 	  adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4689 	  break;
4690 	case UNDO_LINKS:
4691 	  *undo->where.l = undo->old_contents.l;
4692 	  break;
4693 	default:
4694 	  gcc_unreachable ();
4695 	}
4696 
4697       undo->next = undobuf.frees;
4698       undobuf.frees = undo;
4699     }
4700 
4701   undobuf.undos = (struct undo *) marker;
4702 }
4703 
4704 /* Undo all the modifications recorded in undobuf.  */
4705 
4706 static void
4707 undo_all (void)
4708 {
4709   undo_to_marker (0);
4710 }
4711 
4712 /* We've committed to accepting the changes we made.  Move all
4713    of the undos to the free list.  */
4714 
4715 static void
4716 undo_commit (void)
4717 {
4718   struct undo *undo, *next;
4719 
4720   for (undo = undobuf.undos; undo; undo = next)
4721     {
4722       next = undo->next;
4723       undo->next = undobuf.frees;
4724       undobuf.frees = undo;
4725     }
4726   undobuf.undos = 0;
4727 }
4728 
4729 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4730    where we have an arithmetic expression and return that point.  LOC will
4731    be inside INSN.
4732 
4733    try_combine will call this function to see if an insn can be split into
4734    two insns.  */
4735 
4736 static rtx *
4737 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4738 {
4739   rtx x = *loc;
4740   enum rtx_code code = GET_CODE (x);
4741   rtx *split;
4742   unsigned HOST_WIDE_INT len = 0;
4743   HOST_WIDE_INT pos = 0;
4744   int unsignedp = 0;
4745   rtx inner = NULL_RTX;
4746 
4747   /* First special-case some codes.  */
4748   switch (code)
4749     {
4750     case SUBREG:
4751 #ifdef INSN_SCHEDULING
4752       /* If we are making a paradoxical SUBREG invalid, it becomes a split
4753 	 point.  */
4754       if (MEM_P (SUBREG_REG (x)))
4755 	return loc;
4756 #endif
4757       return find_split_point (&SUBREG_REG (x), insn, false);
4758 
4759     case MEM:
4760       /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4761 	 using LO_SUM and HIGH.  */
4762       if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4763 			  || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4764 	{
4765 	  machine_mode address_mode = get_address_mode (x);
4766 
4767 	  SUBST (XEXP (x, 0),
4768 		 gen_rtx_LO_SUM (address_mode,
4769 				 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4770 				 XEXP (x, 0)));
4771 	  return &XEXP (XEXP (x, 0), 0);
4772 	}
4773 
4774       /* If we have a PLUS whose second operand is a constant and the
4775 	 address is not valid, perhaps will can split it up using
4776 	 the machine-specific way to split large constants.  We use
4777 	 the first pseudo-reg (one of the virtual regs) as a placeholder;
4778 	 it will not remain in the result.  */
4779       if (GET_CODE (XEXP (x, 0)) == PLUS
4780 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4781 	  && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4782 					    MEM_ADDR_SPACE (x)))
4783 	{
4784 	  rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4785 	  rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4786 					       subst_insn);
4787 
4788 	  /* This should have produced two insns, each of which sets our
4789 	     placeholder.  If the source of the second is a valid address,
4790 	     we can make put both sources together and make a split point
4791 	     in the middle.  */
4792 
4793 	  if (seq
4794 	      && NEXT_INSN (seq) != NULL_RTX
4795 	      && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4796 	      && NONJUMP_INSN_P (seq)
4797 	      && GET_CODE (PATTERN (seq)) == SET
4798 	      && SET_DEST (PATTERN (seq)) == reg
4799 	      && ! reg_mentioned_p (reg,
4800 				    SET_SRC (PATTERN (seq)))
4801 	      && NONJUMP_INSN_P (NEXT_INSN (seq))
4802 	      && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4803 	      && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4804 	      && memory_address_addr_space_p
4805 		   (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4806 		    MEM_ADDR_SPACE (x)))
4807 	    {
4808 	      rtx src1 = SET_SRC (PATTERN (seq));
4809 	      rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4810 
4811 	      /* Replace the placeholder in SRC2 with SRC1.  If we can
4812 		 find where in SRC2 it was placed, that can become our
4813 		 split point and we can replace this address with SRC2.
4814 		 Just try two obvious places.  */
4815 
4816 	      src2 = replace_rtx (src2, reg, src1);
4817 	      split = 0;
4818 	      if (XEXP (src2, 0) == src1)
4819 		split = &XEXP (src2, 0);
4820 	      else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4821 		       && XEXP (XEXP (src2, 0), 0) == src1)
4822 		split = &XEXP (XEXP (src2, 0), 0);
4823 
4824 	      if (split)
4825 		{
4826 		  SUBST (XEXP (x, 0), src2);
4827 		  return split;
4828 		}
4829 	    }
4830 
4831 	  /* If that didn't work, perhaps the first operand is complex and
4832 	     needs to be computed separately, so make a split point there.
4833 	     This will occur on machines that just support REG + CONST
4834 	     and have a constant moved through some previous computation.  */
4835 
4836 	  else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4837 		   && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4838 			 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4839 	    return &XEXP (XEXP (x, 0), 0);
4840 	}
4841 
4842       /* If we have a PLUS whose first operand is complex, try computing it
4843          separately by making a split there.  */
4844       if (GET_CODE (XEXP (x, 0)) == PLUS
4845           && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4846 					    MEM_ADDR_SPACE (x))
4847           && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4848           && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4849                 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4850         return &XEXP (XEXP (x, 0), 0);
4851       break;
4852 
4853     case SET:
4854       /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4855 	 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4856 	 we need to put the operand into a register.  So split at that
4857 	 point.  */
4858 
4859       if (SET_DEST (x) == cc0_rtx
4860 	  && GET_CODE (SET_SRC (x)) != COMPARE
4861 	  && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4862 	  && !OBJECT_P (SET_SRC (x))
4863 	  && ! (GET_CODE (SET_SRC (x)) == SUBREG
4864 		&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4865 	return &SET_SRC (x);
4866 
4867       /* See if we can split SET_SRC as it stands.  */
4868       split = find_split_point (&SET_SRC (x), insn, true);
4869       if (split && split != &SET_SRC (x))
4870 	return split;
4871 
4872       /* See if we can split SET_DEST as it stands.  */
4873       split = find_split_point (&SET_DEST (x), insn, false);
4874       if (split && split != &SET_DEST (x))
4875 	return split;
4876 
4877       /* See if this is a bitfield assignment with everything constant.  If
4878 	 so, this is an IOR of an AND, so split it into that.  */
4879       if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4880 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4881 	  && CONST_INT_P (XEXP (SET_DEST (x), 1))
4882 	  && CONST_INT_P (XEXP (SET_DEST (x), 2))
4883 	  && CONST_INT_P (SET_SRC (x))
4884 	  && ((INTVAL (XEXP (SET_DEST (x), 1))
4885 	       + INTVAL (XEXP (SET_DEST (x), 2)))
4886 	      <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4887 	  && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4888 	{
4889 	  HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4890 	  unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4891 	  unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4892 	  rtx dest = XEXP (SET_DEST (x), 0);
4893 	  machine_mode mode = GET_MODE (dest);
4894 	  unsigned HOST_WIDE_INT mask
4895 	    = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4896 	  rtx or_mask;
4897 
4898 	  if (BITS_BIG_ENDIAN)
4899 	    pos = GET_MODE_PRECISION (mode) - len - pos;
4900 
4901 	  or_mask = gen_int_mode (src << pos, mode);
4902 	  if (src == mask)
4903 	    SUBST (SET_SRC (x),
4904 		   simplify_gen_binary (IOR, mode, dest, or_mask));
4905 	  else
4906 	    {
4907 	      rtx negmask = gen_int_mode (~(mask << pos), mode);
4908 	      SUBST (SET_SRC (x),
4909 		     simplify_gen_binary (IOR, mode,
4910 					  simplify_gen_binary (AND, mode,
4911 							       dest, negmask),
4912 					  or_mask));
4913 	    }
4914 
4915 	  SUBST (SET_DEST (x), dest);
4916 
4917 	  split = find_split_point (&SET_SRC (x), insn, true);
4918 	  if (split && split != &SET_SRC (x))
4919 	    return split;
4920 	}
4921 
4922       /* Otherwise, see if this is an operation that we can split into two.
4923 	 If so, try to split that.  */
4924       code = GET_CODE (SET_SRC (x));
4925 
4926       switch (code)
4927 	{
4928 	case AND:
4929 	  /* If we are AND'ing with a large constant that is only a single
4930 	     bit and the result is only being used in a context where we
4931 	     need to know if it is zero or nonzero, replace it with a bit
4932 	     extraction.  This will avoid the large constant, which might
4933 	     have taken more than one insn to make.  If the constant were
4934 	     not a valid argument to the AND but took only one insn to make,
4935 	     this is no worse, but if it took more than one insn, it will
4936 	     be better.  */
4937 
4938 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4939 	      && REG_P (XEXP (SET_SRC (x), 0))
4940 	      && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4941 	      && REG_P (SET_DEST (x))
4942 	      && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
4943 	      && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4944 	      && XEXP (*split, 0) == SET_DEST (x)
4945 	      && XEXP (*split, 1) == const0_rtx)
4946 	    {
4947 	      rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4948 						XEXP (SET_SRC (x), 0),
4949 						pos, NULL_RTX, 1, 1, 0, 0);
4950 	      if (extraction != 0)
4951 		{
4952 		  SUBST (SET_SRC (x), extraction);
4953 		  return find_split_point (loc, insn, false);
4954 		}
4955 	    }
4956 	  break;
4957 
4958 	case NE:
4959 	  /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4960 	     is known to be on, this can be converted into a NEG of a shift.  */
4961 	  if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4962 	      && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4963 	      && 1 <= (pos = exact_log2
4964 		       (nonzero_bits (XEXP (SET_SRC (x), 0),
4965 				      GET_MODE (XEXP (SET_SRC (x), 0))))))
4966 	    {
4967 	      machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4968 
4969 	      SUBST (SET_SRC (x),
4970 		     gen_rtx_NEG (mode,
4971 				  gen_rtx_LSHIFTRT (mode,
4972 						    XEXP (SET_SRC (x), 0),
4973 						    GEN_INT (pos))));
4974 
4975 	      split = find_split_point (&SET_SRC (x), insn, true);
4976 	      if (split && split != &SET_SRC (x))
4977 		return split;
4978 	    }
4979 	  break;
4980 
4981 	case SIGN_EXTEND:
4982 	  inner = XEXP (SET_SRC (x), 0);
4983 
4984 	  /* We can't optimize if either mode is a partial integer
4985 	     mode as we don't know how many bits are significant
4986 	     in those modes.  */
4987 	  if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4988 	      || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4989 	    break;
4990 
4991 	  pos = 0;
4992 	  len = GET_MODE_PRECISION (GET_MODE (inner));
4993 	  unsignedp = 0;
4994 	  break;
4995 
4996 	case SIGN_EXTRACT:
4997 	case ZERO_EXTRACT:
4998 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4999 	      && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5000 	    {
5001 	      inner = XEXP (SET_SRC (x), 0);
5002 	      len = INTVAL (XEXP (SET_SRC (x), 1));
5003 	      pos = INTVAL (XEXP (SET_SRC (x), 2));
5004 
5005 	      if (BITS_BIG_ENDIAN)
5006 		pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
5007 	      unsignedp = (code == ZERO_EXTRACT);
5008 	    }
5009 	  break;
5010 
5011 	default:
5012 	  break;
5013 	}
5014 
5015       if (len && pos >= 0
5016 	  && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
5017 	{
5018 	  machine_mode mode = GET_MODE (SET_SRC (x));
5019 
5020 	  /* For unsigned, we have a choice of a shift followed by an
5021 	     AND or two shifts.  Use two shifts for field sizes where the
5022 	     constant might be too large.  We assume here that we can
5023 	     always at least get 8-bit constants in an AND insn, which is
5024 	     true for every current RISC.  */
5025 
5026 	  if (unsignedp && len <= 8)
5027 	    {
5028 	      unsigned HOST_WIDE_INT mask
5029 		= ((unsigned HOST_WIDE_INT) 1 << len) - 1;
5030 	      SUBST (SET_SRC (x),
5031 		     gen_rtx_AND (mode,
5032 				  gen_rtx_LSHIFTRT
5033 				  (mode, gen_lowpart (mode, inner),
5034 				   GEN_INT (pos)),
5035 				  gen_int_mode (mask, mode)));
5036 
5037 	      split = find_split_point (&SET_SRC (x), insn, true);
5038 	      if (split && split != &SET_SRC (x))
5039 		return split;
5040 	    }
5041 	  else
5042 	    {
5043 	      SUBST (SET_SRC (x),
5044 		     gen_rtx_fmt_ee
5045 		     (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5046 		      gen_rtx_ASHIFT (mode,
5047 				      gen_lowpart (mode, inner),
5048 				      GEN_INT (GET_MODE_PRECISION (mode)
5049 					       - len - pos)),
5050 		      GEN_INT (GET_MODE_PRECISION (mode) - len)));
5051 
5052 	      split = find_split_point (&SET_SRC (x), insn, true);
5053 	      if (split && split != &SET_SRC (x))
5054 		return split;
5055 	    }
5056 	}
5057 
5058       /* See if this is a simple operation with a constant as the second
5059 	 operand.  It might be that this constant is out of range and hence
5060 	 could be used as a split point.  */
5061       if (BINARY_P (SET_SRC (x))
5062 	  && CONSTANT_P (XEXP (SET_SRC (x), 1))
5063 	  && (OBJECT_P (XEXP (SET_SRC (x), 0))
5064 	      || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5065 		  && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5066 	return &XEXP (SET_SRC (x), 1);
5067 
5068       /* Finally, see if this is a simple operation with its first operand
5069 	 not in a register.  The operation might require this operand in a
5070 	 register, so return it as a split point.  We can always do this
5071 	 because if the first operand were another operation, we would have
5072 	 already found it as a split point.  */
5073       if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5074 	  && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5075 	return &XEXP (SET_SRC (x), 0);
5076 
5077       return 0;
5078 
5079     case AND:
5080     case IOR:
5081       /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5082 	 it is better to write this as (not (ior A B)) so we can split it.
5083 	 Similarly for IOR.  */
5084       if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5085 	{
5086 	  SUBST (*loc,
5087 		 gen_rtx_NOT (GET_MODE (x),
5088 			      gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5089 					      GET_MODE (x),
5090 					      XEXP (XEXP (x, 0), 0),
5091 					      XEXP (XEXP (x, 1), 0))));
5092 	  return find_split_point (loc, insn, set_src);
5093 	}
5094 
5095       /* Many RISC machines have a large set of logical insns.  If the
5096 	 second operand is a NOT, put it first so we will try to split the
5097 	 other operand first.  */
5098       if (GET_CODE (XEXP (x, 1)) == NOT)
5099 	{
5100 	  rtx tem = XEXP (x, 0);
5101 	  SUBST (XEXP (x, 0), XEXP (x, 1));
5102 	  SUBST (XEXP (x, 1), tem);
5103 	}
5104       break;
5105 
5106     case PLUS:
5107     case MINUS:
5108       /* Canonicalization can produce (minus A (mult B C)), where C is a
5109 	 constant.  It may be better to try splitting (plus (mult B -C) A)
5110 	 instead if this isn't a multiply by a power of two.  */
5111       if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5112 	  && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5113 	  && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
5114 	{
5115 	  machine_mode mode = GET_MODE (x);
5116 	  unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5117 	  HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5118 	  SUBST (*loc, gen_rtx_PLUS (mode,
5119 				     gen_rtx_MULT (mode,
5120 						   XEXP (XEXP (x, 1), 0),
5121 						   gen_int_mode (other_int,
5122 								 mode)),
5123 				     XEXP (x, 0)));
5124 	  return find_split_point (loc, insn, set_src);
5125 	}
5126 
5127       /* Split at a multiply-accumulate instruction.  However if this is
5128          the SET_SRC, we likely do not have such an instruction and it's
5129          worthless to try this split.  */
5130       if (!set_src
5131 	  && (GET_CODE (XEXP (x, 0)) == MULT
5132 	      || (GET_CODE (XEXP (x, 0)) == ASHIFT
5133 		  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5134         return loc;
5135 
5136     default:
5137       break;
5138     }
5139 
5140   /* Otherwise, select our actions depending on our rtx class.  */
5141   switch (GET_RTX_CLASS (code))
5142     {
5143     case RTX_BITFIELD_OPS:		/* This is ZERO_EXTRACT and SIGN_EXTRACT.  */
5144     case RTX_TERNARY:
5145       split = find_split_point (&XEXP (x, 2), insn, false);
5146       if (split)
5147 	return split;
5148       /* ... fall through ...  */
5149     case RTX_BIN_ARITH:
5150     case RTX_COMM_ARITH:
5151     case RTX_COMPARE:
5152     case RTX_COMM_COMPARE:
5153       split = find_split_point (&XEXP (x, 1), insn, false);
5154       if (split)
5155 	return split;
5156       /* ... fall through ...  */
5157     case RTX_UNARY:
5158       /* Some machines have (and (shift ...) ...) insns.  If X is not
5159 	 an AND, but XEXP (X, 0) is, use it as our split point.  */
5160       if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5161 	return &XEXP (x, 0);
5162 
5163       split = find_split_point (&XEXP (x, 0), insn, false);
5164       if (split)
5165 	return split;
5166       return loc;
5167 
5168     default:
5169       /* Otherwise, we don't have a split point.  */
5170       return 0;
5171     }
5172 }
5173 
5174 /* Throughout X, replace FROM with TO, and return the result.
5175    The result is TO if X is FROM;
5176    otherwise the result is X, but its contents may have been modified.
5177    If they were modified, a record was made in undobuf so that
5178    undo_all will (among other things) return X to its original state.
5179 
5180    If the number of changes necessary is too much to record to undo,
5181    the excess changes are not made, so the result is invalid.
5182    The changes already made can still be undone.
5183    undobuf.num_undo is incremented for such changes, so by testing that
5184    the caller can tell whether the result is valid.
5185 
5186    `n_occurrences' is incremented each time FROM is replaced.
5187 
5188    IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5189 
5190    IN_COND is nonzero if we are at the top level of a condition.
5191 
5192    UNIQUE_COPY is nonzero if each substitution must be unique.  We do this
5193    by copying if `n_occurrences' is nonzero.  */
5194 
5195 static rtx
5196 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5197 {
5198   enum rtx_code code = GET_CODE (x);
5199   machine_mode op0_mode = VOIDmode;
5200   const char *fmt;
5201   int len, i;
5202   rtx new_rtx;
5203 
5204 /* Two expressions are equal if they are identical copies of a shared
5205    RTX or if they are both registers with the same register number
5206    and mode.  */
5207 
5208 #define COMBINE_RTX_EQUAL_P(X,Y)			\
5209   ((X) == (Y)						\
5210    || (REG_P (X) && REG_P (Y)	\
5211        && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5212 
5213   /* Do not substitute into clobbers of regs -- this will never result in
5214      valid RTL.  */
5215   if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5216     return x;
5217 
5218   if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5219     {
5220       n_occurrences++;
5221       return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5222     }
5223 
5224   /* If X and FROM are the same register but different modes, they
5225      will not have been seen as equal above.  However, the log links code
5226      will make a LOG_LINKS entry for that case.  If we do nothing, we
5227      will try to rerecognize our original insn and, when it succeeds,
5228      we will delete the feeding insn, which is incorrect.
5229 
5230      So force this insn not to match in this (rare) case.  */
5231   if (! in_dest && code == REG && REG_P (from)
5232       && reg_overlap_mentioned_p (x, from))
5233     return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5234 
5235   /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5236      of which may contain things that can be combined.  */
5237   if (code != MEM && code != LO_SUM && OBJECT_P (x))
5238     return x;
5239 
5240   /* It is possible to have a subexpression appear twice in the insn.
5241      Suppose that FROM is a register that appears within TO.
5242      Then, after that subexpression has been scanned once by `subst',
5243      the second time it is scanned, TO may be found.  If we were
5244      to scan TO here, we would find FROM within it and create a
5245      self-referent rtl structure which is completely wrong.  */
5246   if (COMBINE_RTX_EQUAL_P (x, to))
5247     return to;
5248 
5249   /* Parallel asm_operands need special attention because all of the
5250      inputs are shared across the arms.  Furthermore, unsharing the
5251      rtl results in recognition failures.  Failure to handle this case
5252      specially can result in circular rtl.
5253 
5254      Solve this by doing a normal pass across the first entry of the
5255      parallel, and only processing the SET_DESTs of the subsequent
5256      entries.  Ug.  */
5257 
5258   if (code == PARALLEL
5259       && GET_CODE (XVECEXP (x, 0, 0)) == SET
5260       && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5261     {
5262       new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5263 
5264       /* If this substitution failed, this whole thing fails.  */
5265       if (GET_CODE (new_rtx) == CLOBBER
5266 	  && XEXP (new_rtx, 0) == const0_rtx)
5267 	return new_rtx;
5268 
5269       SUBST (XVECEXP (x, 0, 0), new_rtx);
5270 
5271       for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5272 	{
5273 	  rtx dest = SET_DEST (XVECEXP (x, 0, i));
5274 
5275 	  if (!REG_P (dest)
5276 	      && GET_CODE (dest) != CC0
5277 	      && GET_CODE (dest) != PC)
5278 	    {
5279 	      new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5280 
5281 	      /* If this substitution failed, this whole thing fails.  */
5282 	      if (GET_CODE (new_rtx) == CLOBBER
5283 		  && XEXP (new_rtx, 0) == const0_rtx)
5284 		return new_rtx;
5285 
5286 	      SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5287 	    }
5288 	}
5289     }
5290   else
5291     {
5292       len = GET_RTX_LENGTH (code);
5293       fmt = GET_RTX_FORMAT (code);
5294 
5295       /* We don't need to process a SET_DEST that is a register, CC0,
5296 	 or PC, so set up to skip this common case.  All other cases
5297 	 where we want to suppress replacing something inside a
5298 	 SET_SRC are handled via the IN_DEST operand.  */
5299       if (code == SET
5300 	  && (REG_P (SET_DEST (x))
5301 	      || GET_CODE (SET_DEST (x)) == CC0
5302 	      || GET_CODE (SET_DEST (x)) == PC))
5303 	fmt = "ie";
5304 
5305       /* Trying to simplify the operands of a widening MULT is not likely
5306 	 to create RTL matching a machine insn.  */
5307       if (code == MULT
5308 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5309 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5310 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5311 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5312 	  && REG_P (XEXP (XEXP (x, 0), 0))
5313 	  && REG_P (XEXP (XEXP (x, 1), 0))
5314 	  && from == to)
5315 	return x;
5316 
5317 
5318       /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5319 	 constant.  */
5320       if (fmt[0] == 'e')
5321 	op0_mode = GET_MODE (XEXP (x, 0));
5322 
5323       for (i = 0; i < len; i++)
5324 	{
5325 	  if (fmt[i] == 'E')
5326 	    {
5327 	      int j;
5328 	      for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5329 		{
5330 		  if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5331 		    {
5332 		      new_rtx = (unique_copy && n_occurrences
5333 			     ? copy_rtx (to) : to);
5334 		      n_occurrences++;
5335 		    }
5336 		  else
5337 		    {
5338 		      new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5339 				       unique_copy);
5340 
5341 		      /* If this substitution failed, this whole thing
5342 			 fails.  */
5343 		      if (GET_CODE (new_rtx) == CLOBBER
5344 			  && XEXP (new_rtx, 0) == const0_rtx)
5345 			return new_rtx;
5346 		    }
5347 
5348 		  SUBST (XVECEXP (x, i, j), new_rtx);
5349 		}
5350 	    }
5351 	  else if (fmt[i] == 'e')
5352 	    {
5353 	      /* If this is a register being set, ignore it.  */
5354 	      new_rtx = XEXP (x, i);
5355 	      if (in_dest
5356 		  && i == 0
5357 		  && (((code == SUBREG || code == ZERO_EXTRACT)
5358 		       && REG_P (new_rtx))
5359 		      || code == STRICT_LOW_PART))
5360 		;
5361 
5362 	      else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5363 		{
5364 		  /* In general, don't install a subreg involving two
5365 		     modes not tieable.  It can worsen register
5366 		     allocation, and can even make invalid reload
5367 		     insns, since the reg inside may need to be copied
5368 		     from in the outside mode, and that may be invalid
5369 		     if it is an fp reg copied in integer mode.
5370 
5371 		     We allow two exceptions to this: It is valid if
5372 		     it is inside another SUBREG and the mode of that
5373 		     SUBREG and the mode of the inside of TO is
5374 		     tieable and it is valid if X is a SET that copies
5375 		     FROM to CC0.  */
5376 
5377 		  if (GET_CODE (to) == SUBREG
5378 		      && ! MODES_TIEABLE_P (GET_MODE (to),
5379 					    GET_MODE (SUBREG_REG (to)))
5380 		      && ! (code == SUBREG
5381 			    && MODES_TIEABLE_P (GET_MODE (x),
5382 						GET_MODE (SUBREG_REG (to))))
5383 		      && (!HAVE_cc0
5384 			  || (! (code == SET
5385 				 && i == 1
5386 				 && XEXP (x, 0) == cc0_rtx))))
5387 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5388 
5389 		  if (code == SUBREG
5390 		      && REG_P (to)
5391 		      && REGNO (to) < FIRST_PSEUDO_REGISTER
5392 		      && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5393 						SUBREG_BYTE (x),
5394 						GET_MODE (x)) < 0)
5395 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5396 
5397 		  new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5398 		  n_occurrences++;
5399 		}
5400 	      else
5401 		/* If we are in a SET_DEST, suppress most cases unless we
5402 		   have gone inside a MEM, in which case we want to
5403 		   simplify the address.  We assume here that things that
5404 		   are actually part of the destination have their inner
5405 		   parts in the first expression.  This is true for SUBREG,
5406 		   STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5407 		   things aside from REG and MEM that should appear in a
5408 		   SET_DEST.  */
5409 		new_rtx = subst (XEXP (x, i), from, to,
5410 			     (((in_dest
5411 				&& (code == SUBREG || code == STRICT_LOW_PART
5412 				    || code == ZERO_EXTRACT))
5413 			       || code == SET)
5414 			      && i == 0),
5415 				 code == IF_THEN_ELSE && i == 0,
5416 				 unique_copy);
5417 
5418 	      /* If we found that we will have to reject this combination,
5419 		 indicate that by returning the CLOBBER ourselves, rather than
5420 		 an expression containing it.  This will speed things up as
5421 		 well as prevent accidents where two CLOBBERs are considered
5422 		 to be equal, thus producing an incorrect simplification.  */
5423 
5424 	      if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5425 		return new_rtx;
5426 
5427 	      if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5428 		{
5429 		  machine_mode mode = GET_MODE (x);
5430 
5431 		  x = simplify_subreg (GET_MODE (x), new_rtx,
5432 				       GET_MODE (SUBREG_REG (x)),
5433 				       SUBREG_BYTE (x));
5434 		  if (! x)
5435 		    x = gen_rtx_CLOBBER (mode, const0_rtx);
5436 		}
5437 	      else if (CONST_SCALAR_INT_P (new_rtx)
5438 		       && (GET_CODE (x) == ZERO_EXTEND
5439 			   || GET_CODE (x) == FLOAT
5440 			   || GET_CODE (x) == UNSIGNED_FLOAT))
5441 		{
5442 		  x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5443 						new_rtx,
5444 						GET_MODE (XEXP (x, 0)));
5445 		  if (!x)
5446 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5447 		}
5448 	      else
5449 		SUBST (XEXP (x, i), new_rtx);
5450 	    }
5451 	}
5452     }
5453 
5454   /* Check if we are loading something from the constant pool via float
5455      extension; in this case we would undo compress_float_constant
5456      optimization and degenerate constant load to an immediate value.  */
5457   if (GET_CODE (x) == FLOAT_EXTEND
5458       && MEM_P (XEXP (x, 0))
5459       && MEM_READONLY_P (XEXP (x, 0)))
5460     {
5461       rtx tmp = avoid_constant_pool_reference (x);
5462       if (x != tmp)
5463         return x;
5464     }
5465 
5466   /* Try to simplify X.  If the simplification changed the code, it is likely
5467      that further simplification will help, so loop, but limit the number
5468      of repetitions that will be performed.  */
5469 
5470   for (i = 0; i < 4; i++)
5471     {
5472       /* If X is sufficiently simple, don't bother trying to do anything
5473 	 with it.  */
5474       if (code != CONST_INT && code != REG && code != CLOBBER)
5475 	x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5476 
5477       if (GET_CODE (x) == code)
5478 	break;
5479 
5480       code = GET_CODE (x);
5481 
5482       /* We no longer know the original mode of operand 0 since we
5483 	 have changed the form of X)  */
5484       op0_mode = VOIDmode;
5485     }
5486 
5487   return x;
5488 }
5489 
5490 /* Simplify X, a piece of RTL.  We just operate on the expression at the
5491    outer level; call `subst' to simplify recursively.  Return the new
5492    expression.
5493 
5494    OP0_MODE is the original mode of XEXP (x, 0).  IN_DEST is nonzero
5495    if we are inside a SET_DEST.  IN_COND is nonzero if we are at the top level
5496    of a condition.  */
5497 
5498 static rtx
5499 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5500 		      int in_cond)
5501 {
5502   enum rtx_code code = GET_CODE (x);
5503   machine_mode mode = GET_MODE (x);
5504   rtx temp;
5505   int i;
5506 
5507   /* If this is a commutative operation, put a constant last and a complex
5508      expression first.  We don't need to do this for comparisons here.  */
5509   if (COMMUTATIVE_ARITH_P (x)
5510       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5511     {
5512       temp = XEXP (x, 0);
5513       SUBST (XEXP (x, 0), XEXP (x, 1));
5514       SUBST (XEXP (x, 1), temp);
5515     }
5516 
5517   /* Try to fold this expression in case we have constants that weren't
5518      present before.  */
5519   temp = 0;
5520   switch (GET_RTX_CLASS (code))
5521     {
5522     case RTX_UNARY:
5523       if (op0_mode == VOIDmode)
5524 	op0_mode = GET_MODE (XEXP (x, 0));
5525       temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5526       break;
5527     case RTX_COMPARE:
5528     case RTX_COMM_COMPARE:
5529       {
5530 	machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5531 	if (cmp_mode == VOIDmode)
5532 	  {
5533 	    cmp_mode = GET_MODE (XEXP (x, 1));
5534 	    if (cmp_mode == VOIDmode)
5535 	      cmp_mode = op0_mode;
5536 	  }
5537 	temp = simplify_relational_operation (code, mode, cmp_mode,
5538 					      XEXP (x, 0), XEXP (x, 1));
5539       }
5540       break;
5541     case RTX_COMM_ARITH:
5542     case RTX_BIN_ARITH:
5543       temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5544       break;
5545     case RTX_BITFIELD_OPS:
5546     case RTX_TERNARY:
5547       temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5548 					 XEXP (x, 1), XEXP (x, 2));
5549       break;
5550     default:
5551       break;
5552     }
5553 
5554   if (temp)
5555     {
5556       x = temp;
5557       code = GET_CODE (temp);
5558       op0_mode = VOIDmode;
5559       mode = GET_MODE (temp);
5560     }
5561 
5562   /* If this is a simple operation applied to an IF_THEN_ELSE, try
5563      applying it to the arms of the IF_THEN_ELSE.  This often simplifies
5564      things.  Check for cases where both arms are testing the same
5565      condition.
5566 
5567      Don't do anything if all operands are very simple.  */
5568 
5569   if ((BINARY_P (x)
5570        && ((!OBJECT_P (XEXP (x, 0))
5571 	    && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5572 		  && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5573 	   || (!OBJECT_P (XEXP (x, 1))
5574 	       && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5575 		     && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5576       || (UNARY_P (x)
5577 	  && (!OBJECT_P (XEXP (x, 0))
5578 	       && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5579 		     && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5580     {
5581       rtx cond, true_rtx, false_rtx;
5582 
5583       cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5584       if (cond != 0
5585 	  /* If everything is a comparison, what we have is highly unlikely
5586 	     to be simpler, so don't use it.  */
5587 	  && ! (COMPARISON_P (x)
5588 		&& (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5589 	  /* Similarly, if we end up with one of the expressions the same
5590 	     as the original, it is certainly not simpler.  */
5591 	  && ! rtx_equal_p (x, true_rtx)
5592 	  && ! rtx_equal_p (x, false_rtx))
5593 	{
5594 	  rtx cop1 = const0_rtx;
5595 	  enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5596 
5597 	  if (cond_code == NE && COMPARISON_P (cond))
5598 	    return x;
5599 
5600 	  /* Simplify the alternative arms; this may collapse the true and
5601 	     false arms to store-flag values.  Be careful to use copy_rtx
5602 	     here since true_rtx or false_rtx might share RTL with x as a
5603 	     result of the if_then_else_cond call above.  */
5604 	  true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5605 	  false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5606 
5607 	  /* If true_rtx and false_rtx are not general_operands, an if_then_else
5608 	     is unlikely to be simpler.  */
5609 	  if (general_operand (true_rtx, VOIDmode)
5610 	      && general_operand (false_rtx, VOIDmode))
5611 	    {
5612 	      enum rtx_code reversed;
5613 
5614 	      /* Restarting if we generate a store-flag expression will cause
5615 		 us to loop.  Just drop through in this case.  */
5616 
5617 	      /* If the result values are STORE_FLAG_VALUE and zero, we can
5618 		 just make the comparison operation.  */
5619 	      if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5620 		x = simplify_gen_relational (cond_code, mode, VOIDmode,
5621 					     cond, cop1);
5622 	      else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5623 		       && ((reversed = reversed_comparison_code_parts
5624 					(cond_code, cond, cop1, NULL))
5625 			   != UNKNOWN))
5626 		x = simplify_gen_relational (reversed, mode, VOIDmode,
5627 					     cond, cop1);
5628 
5629 	      /* Likewise, we can make the negate of a comparison operation
5630 		 if the result values are - STORE_FLAG_VALUE and zero.  */
5631 	      else if (CONST_INT_P (true_rtx)
5632 		       && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5633 		       && false_rtx == const0_rtx)
5634 		x = simplify_gen_unary (NEG, mode,
5635 					simplify_gen_relational (cond_code,
5636 								 mode, VOIDmode,
5637 								 cond, cop1),
5638 					mode);
5639 	      else if (CONST_INT_P (false_rtx)
5640 		       && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5641 		       && true_rtx == const0_rtx
5642 		       && ((reversed = reversed_comparison_code_parts
5643 					(cond_code, cond, cop1, NULL))
5644 			   != UNKNOWN))
5645 		x = simplify_gen_unary (NEG, mode,
5646 					simplify_gen_relational (reversed,
5647 								 mode, VOIDmode,
5648 								 cond, cop1),
5649 					mode);
5650 	      else
5651 		return gen_rtx_IF_THEN_ELSE (mode,
5652 					     simplify_gen_relational (cond_code,
5653 								      mode,
5654 								      VOIDmode,
5655 								      cond,
5656 								      cop1),
5657 					     true_rtx, false_rtx);
5658 
5659 	      code = GET_CODE (x);
5660 	      op0_mode = VOIDmode;
5661 	    }
5662 	}
5663     }
5664 
5665   /* First see if we can apply the inverse distributive law.  */
5666   if (code == PLUS || code == MINUS
5667       || code == AND || code == IOR || code == XOR)
5668     {
5669       x = apply_distributive_law (x);
5670       code = GET_CODE (x);
5671       op0_mode = VOIDmode;
5672     }
5673 
5674   /* If CODE is an associative operation not otherwise handled, see if we
5675      can associate some operands.  This can win if they are constants or
5676      if they are logically related (i.e. (a & b) & a).  */
5677   if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5678        || code == AND || code == IOR || code == XOR
5679        || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5680       && ((INTEGRAL_MODE_P (mode) && code != DIV)
5681 	  || (flag_associative_math && FLOAT_MODE_P (mode))))
5682     {
5683       if (GET_CODE (XEXP (x, 0)) == code)
5684 	{
5685 	  rtx other = XEXP (XEXP (x, 0), 0);
5686 	  rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5687 	  rtx inner_op1 = XEXP (x, 1);
5688 	  rtx inner;
5689 
5690 	  /* Make sure we pass the constant operand if any as the second
5691 	     one if this is a commutative operation.  */
5692 	  if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5693 	    std::swap (inner_op0, inner_op1);
5694 	  inner = simplify_binary_operation (code == MINUS ? PLUS
5695 					     : code == DIV ? MULT
5696 					     : code,
5697 					     mode, inner_op0, inner_op1);
5698 
5699 	  /* For commutative operations, try the other pair if that one
5700 	     didn't simplify.  */
5701 	  if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5702 	    {
5703 	      other = XEXP (XEXP (x, 0), 1);
5704 	      inner = simplify_binary_operation (code, mode,
5705 						 XEXP (XEXP (x, 0), 0),
5706 						 XEXP (x, 1));
5707 	    }
5708 
5709 	  if (inner)
5710 	    return simplify_gen_binary (code, mode, other, inner);
5711 	}
5712     }
5713 
5714   /* A little bit of algebraic simplification here.  */
5715   switch (code)
5716     {
5717     case MEM:
5718       /* Ensure that our address has any ASHIFTs converted to MULT in case
5719 	 address-recognizing predicates are called later.  */
5720       temp = make_compound_operation (XEXP (x, 0), MEM);
5721       SUBST (XEXP (x, 0), temp);
5722       break;
5723 
5724     case SUBREG:
5725       if (op0_mode == VOIDmode)
5726 	op0_mode = GET_MODE (SUBREG_REG (x));
5727 
5728       /* See if this can be moved to simplify_subreg.  */
5729       if (CONSTANT_P (SUBREG_REG (x))
5730 	  && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5731 	     /* Don't call gen_lowpart if the inner mode
5732 		is VOIDmode and we cannot simplify it, as SUBREG without
5733 		inner mode is invalid.  */
5734 	  && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5735 	      || gen_lowpart_common (mode, SUBREG_REG (x))))
5736 	return gen_lowpart (mode, SUBREG_REG (x));
5737 
5738       if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5739 	break;
5740       {
5741 	rtx temp;
5742 	temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5743 				SUBREG_BYTE (x));
5744 	if (temp)
5745 	  return temp;
5746 
5747 	/* If op is known to have all lower bits zero, the result is zero.  */
5748 	if (!in_dest
5749 	    && SCALAR_INT_MODE_P (mode)
5750 	    && SCALAR_INT_MODE_P (op0_mode)
5751 	    && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (op0_mode)
5752 	    && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5753 	    && HWI_COMPUTABLE_MODE_P (op0_mode)
5754 	    && (nonzero_bits (SUBREG_REG (x), op0_mode)
5755 		& GET_MODE_MASK (mode)) == 0)
5756 	  return CONST0_RTX (mode);
5757       }
5758 
5759       /* Don't change the mode of the MEM if that would change the meaning
5760 	 of the address.  */
5761       if (MEM_P (SUBREG_REG (x))
5762 	  && (MEM_VOLATILE_P (SUBREG_REG (x))
5763 	      || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5764 					   MEM_ADDR_SPACE (SUBREG_REG (x)))))
5765 	return gen_rtx_CLOBBER (mode, const0_rtx);
5766 
5767       /* Note that we cannot do any narrowing for non-constants since
5768 	 we might have been counting on using the fact that some bits were
5769 	 zero.  We now do this in the SET.  */
5770 
5771       break;
5772 
5773     case NEG:
5774       temp = expand_compound_operation (XEXP (x, 0));
5775 
5776       /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5777 	 replaced by (lshiftrt X C).  This will convert
5778 	 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y).  */
5779 
5780       if (GET_CODE (temp) == ASHIFTRT
5781 	  && CONST_INT_P (XEXP (temp, 1))
5782 	  && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5783 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5784 				     INTVAL (XEXP (temp, 1)));
5785 
5786       /* If X has only a single bit that might be nonzero, say, bit I, convert
5787 	 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5788 	 MODE minus 1.  This will convert (neg (zero_extract X 1 Y)) to
5789 	 (sign_extract X 1 Y).  But only do this if TEMP isn't a register
5790 	 or a SUBREG of one since we'd be making the expression more
5791 	 complex if it was just a register.  */
5792 
5793       if (!REG_P (temp)
5794 	  && ! (GET_CODE (temp) == SUBREG
5795 		&& REG_P (SUBREG_REG (temp)))
5796 	  && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5797 	{
5798 	  rtx temp1 = simplify_shift_const
5799 	    (NULL_RTX, ASHIFTRT, mode,
5800 	     simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5801 				   GET_MODE_PRECISION (mode) - 1 - i),
5802 	     GET_MODE_PRECISION (mode) - 1 - i);
5803 
5804 	  /* If all we did was surround TEMP with the two shifts, we
5805 	     haven't improved anything, so don't use it.  Otherwise,
5806 	     we are better off with TEMP1.  */
5807 	  if (GET_CODE (temp1) != ASHIFTRT
5808 	      || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5809 	      || XEXP (XEXP (temp1, 0), 0) != temp)
5810 	    return temp1;
5811 	}
5812       break;
5813 
5814     case TRUNCATE:
5815       /* We can't handle truncation to a partial integer mode here
5816 	 because we don't know the real bitsize of the partial
5817 	 integer mode.  */
5818       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5819 	break;
5820 
5821       if (HWI_COMPUTABLE_MODE_P (mode))
5822 	SUBST (XEXP (x, 0),
5823 	       force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5824 			      GET_MODE_MASK (mode), 0));
5825 
5826       /* We can truncate a constant value and return it.  */
5827       if (CONST_INT_P (XEXP (x, 0)))
5828 	return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5829 
5830       /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5831 	 whose value is a comparison can be replaced with a subreg if
5832 	 STORE_FLAG_VALUE permits.  */
5833       if (HWI_COMPUTABLE_MODE_P (mode)
5834 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5835 	  && (temp = get_last_value (XEXP (x, 0)))
5836 	  && COMPARISON_P (temp))
5837 	return gen_lowpart (mode, XEXP (x, 0));
5838       break;
5839 
5840     case CONST:
5841       /* (const (const X)) can become (const X).  Do it this way rather than
5842 	 returning the inner CONST since CONST can be shared with a
5843 	 REG_EQUAL note.  */
5844       if (GET_CODE (XEXP (x, 0)) == CONST)
5845 	SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5846       break;
5847 
5848     case LO_SUM:
5849       /* Convert (lo_sum (high FOO) FOO) to FOO.  This is necessary so we
5850 	 can add in an offset.  find_split_point will split this address up
5851 	 again if it doesn't match.  */
5852       if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5853 	  && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5854 	return XEXP (x, 1);
5855       break;
5856 
5857     case PLUS:
5858       /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5859 	 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5860 	 bit-field and can be replaced by either a sign_extend or a
5861 	 sign_extract.  The `and' may be a zero_extend and the two
5862 	 <c>, -<c> constants may be reversed.  */
5863       if (GET_CODE (XEXP (x, 0)) == XOR
5864 	  && CONST_INT_P (XEXP (x, 1))
5865 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5866 	  && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5867 	  && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5868 	      || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5869 	  && HWI_COMPUTABLE_MODE_P (mode)
5870 	  && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5871 	       && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5872 	       && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5873 		   == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5874 	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5875 		  && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5876 		      == (unsigned int) i + 1))))
5877 	return simplify_shift_const
5878 	  (NULL_RTX, ASHIFTRT, mode,
5879 	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
5880 				 XEXP (XEXP (XEXP (x, 0), 0), 0),
5881 				 GET_MODE_PRECISION (mode) - (i + 1)),
5882 	   GET_MODE_PRECISION (mode) - (i + 1));
5883 
5884       /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5885 	 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5886 	 the bitsize of the mode - 1.  This allows simplification of
5887 	 "a = (b & 8) == 0;"  */
5888       if (XEXP (x, 1) == constm1_rtx
5889 	  && !REG_P (XEXP (x, 0))
5890 	  && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5891 		&& REG_P (SUBREG_REG (XEXP (x, 0))))
5892 	  && nonzero_bits (XEXP (x, 0), mode) == 1)
5893 	return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5894 	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
5895 				 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5896 				 GET_MODE_PRECISION (mode) - 1),
5897 	   GET_MODE_PRECISION (mode) - 1);
5898 
5899       /* If we are adding two things that have no bits in common, convert
5900 	 the addition into an IOR.  This will often be further simplified,
5901 	 for example in cases like ((a & 1) + (a & 2)), which can
5902 	 become a & 3.  */
5903 
5904       if (HWI_COMPUTABLE_MODE_P (mode)
5905 	  && (nonzero_bits (XEXP (x, 0), mode)
5906 	      & nonzero_bits (XEXP (x, 1), mode)) == 0)
5907 	{
5908 	  /* Try to simplify the expression further.  */
5909 	  rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5910 	  temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5911 
5912 	  /* If we could, great.  If not, do not go ahead with the IOR
5913 	     replacement, since PLUS appears in many special purpose
5914 	     address arithmetic instructions.  */
5915 	  if (GET_CODE (temp) != CLOBBER
5916 	      && (GET_CODE (temp) != IOR
5917 		  || ((XEXP (temp, 0) != XEXP (x, 0)
5918 		       || XEXP (temp, 1) != XEXP (x, 1))
5919 		      && (XEXP (temp, 0) != XEXP (x, 1)
5920 			  || XEXP (temp, 1) != XEXP (x, 0)))))
5921 	    return temp;
5922 	}
5923 
5924       /* Canonicalize x + x into x << 1.  */
5925       if (GET_MODE_CLASS (mode) == MODE_INT
5926 	  && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
5927 	  && !side_effects_p (XEXP (x, 0)))
5928 	return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
5929 
5930       break;
5931 
5932     case MINUS:
5933       /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5934 	 (and <foo> (const_int pow2-1))  */
5935       if (GET_CODE (XEXP (x, 1)) == AND
5936 	  && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5937 	  && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5938 	  && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5939 	return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5940 				       -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5941       break;
5942 
5943     case MULT:
5944       /* If we have (mult (plus A B) C), apply the distributive law and then
5945 	 the inverse distributive law to see if things simplify.  This
5946 	 occurs mostly in addresses, often when unrolling loops.  */
5947 
5948       if (GET_CODE (XEXP (x, 0)) == PLUS)
5949 	{
5950 	  rtx result = distribute_and_simplify_rtx (x, 0);
5951 	  if (result)
5952 	    return result;
5953 	}
5954 
5955       /* Try simplify a*(b/c) as (a*b)/c.  */
5956       if (FLOAT_MODE_P (mode) && flag_associative_math
5957 	  && GET_CODE (XEXP (x, 0)) == DIV)
5958 	{
5959 	  rtx tem = simplify_binary_operation (MULT, mode,
5960 					       XEXP (XEXP (x, 0), 0),
5961 					       XEXP (x, 1));
5962 	  if (tem)
5963 	    return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5964 	}
5965       break;
5966 
5967     case UDIV:
5968       /* If this is a divide by a power of two, treat it as a shift if
5969 	 its first operand is a shift.  */
5970       if (CONST_INT_P (XEXP (x, 1))
5971 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5972 	  && (GET_CODE (XEXP (x, 0)) == ASHIFT
5973 	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5974 	      || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5975 	      || GET_CODE (XEXP (x, 0)) == ROTATE
5976 	      || GET_CODE (XEXP (x, 0)) == ROTATERT))
5977 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5978       break;
5979 
5980     case EQ:  case NE:
5981     case GT:  case GTU:  case GE:  case GEU:
5982     case LT:  case LTU:  case LE:  case LEU:
5983     case UNEQ:  case LTGT:
5984     case UNGT:  case UNGE:
5985     case UNLT:  case UNLE:
5986     case UNORDERED: case ORDERED:
5987       /* If the first operand is a condition code, we can't do anything
5988 	 with it.  */
5989       if (GET_CODE (XEXP (x, 0)) == COMPARE
5990 	  || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5991 	      && ! CC0_P (XEXP (x, 0))))
5992 	{
5993 	  rtx op0 = XEXP (x, 0);
5994 	  rtx op1 = XEXP (x, 1);
5995 	  enum rtx_code new_code;
5996 
5997 	  if (GET_CODE (op0) == COMPARE)
5998 	    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5999 
6000 	  /* Simplify our comparison, if possible.  */
6001 	  new_code = simplify_comparison (code, &op0, &op1);
6002 
6003 	  /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6004 	     if only the low-order bit is possibly nonzero in X (such as when
6005 	     X is a ZERO_EXTRACT of one bit).  Similarly, we can convert EQ to
6006 	     (xor X 1) or (minus 1 X); we use the former.  Finally, if X is
6007 	     known to be either 0 or -1, NE becomes a NEG and EQ becomes
6008 	     (plus X 1).
6009 
6010 	     Remove any ZERO_EXTRACT we made when thinking this was a
6011 	     comparison.  It may now be simpler to use, e.g., an AND.  If a
6012 	     ZERO_EXTRACT is indeed appropriate, it will be placed back by
6013 	     the call to make_compound_operation in the SET case.
6014 
6015 	     Don't apply these optimizations if the caller would
6016 	     prefer a comparison rather than a value.
6017 	     E.g., for the condition in an IF_THEN_ELSE most targets need
6018 	     an explicit comparison.  */
6019 
6020 	  if (in_cond)
6021 	    ;
6022 
6023 	  else if (STORE_FLAG_VALUE == 1
6024 	      && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6025 	      && op1 == const0_rtx
6026 	      && mode == GET_MODE (op0)
6027 	      && nonzero_bits (op0, mode) == 1)
6028 	    return gen_lowpart (mode,
6029 				expand_compound_operation (op0));
6030 
6031 	  else if (STORE_FLAG_VALUE == 1
6032 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6033 		   && op1 == const0_rtx
6034 		   && mode == GET_MODE (op0)
6035 		   && (num_sign_bit_copies (op0, mode)
6036 		       == GET_MODE_PRECISION (mode)))
6037 	    {
6038 	      op0 = expand_compound_operation (op0);
6039 	      return simplify_gen_unary (NEG, mode,
6040 					 gen_lowpart (mode, op0),
6041 					 mode);
6042 	    }
6043 
6044 	  else if (STORE_FLAG_VALUE == 1
6045 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6046 		   && op1 == const0_rtx
6047 		   && mode == GET_MODE (op0)
6048 		   && nonzero_bits (op0, mode) == 1)
6049 	    {
6050 	      op0 = expand_compound_operation (op0);
6051 	      return simplify_gen_binary (XOR, mode,
6052 					  gen_lowpart (mode, op0),
6053 					  const1_rtx);
6054 	    }
6055 
6056 	  else if (STORE_FLAG_VALUE == 1
6057 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6058 		   && op1 == const0_rtx
6059 		   && mode == GET_MODE (op0)
6060 		   && (num_sign_bit_copies (op0, mode)
6061 		       == GET_MODE_PRECISION (mode)))
6062 	    {
6063 	      op0 = expand_compound_operation (op0);
6064 	      return plus_constant (mode, gen_lowpart (mode, op0), 1);
6065 	    }
6066 
6067 	  /* If STORE_FLAG_VALUE is -1, we have cases similar to
6068 	     those above.  */
6069 	  if (in_cond)
6070 	    ;
6071 
6072 	  else if (STORE_FLAG_VALUE == -1
6073 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6074 		   && op1 == const0_rtx
6075 		   && mode == GET_MODE (op0)
6076 		   && (num_sign_bit_copies (op0, mode)
6077 		       == GET_MODE_PRECISION (mode)))
6078 	    return gen_lowpart (mode,
6079 				expand_compound_operation (op0));
6080 
6081 	  else if (STORE_FLAG_VALUE == -1
6082 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6083 		   && op1 == const0_rtx
6084 		   && mode == GET_MODE (op0)
6085 		   && nonzero_bits (op0, mode) == 1)
6086 	    {
6087 	      op0 = expand_compound_operation (op0);
6088 	      return simplify_gen_unary (NEG, mode,
6089 					 gen_lowpart (mode, op0),
6090 					 mode);
6091 	    }
6092 
6093 	  else if (STORE_FLAG_VALUE == -1
6094 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6095 		   && op1 == const0_rtx
6096 		   && mode == GET_MODE (op0)
6097 		   && (num_sign_bit_copies (op0, mode)
6098 		       == GET_MODE_PRECISION (mode)))
6099 	    {
6100 	      op0 = expand_compound_operation (op0);
6101 	      return simplify_gen_unary (NOT, mode,
6102 					 gen_lowpart (mode, op0),
6103 					 mode);
6104 	    }
6105 
6106 	  /* If X is 0/1, (eq X 0) is X-1.  */
6107 	  else if (STORE_FLAG_VALUE == -1
6108 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6109 		   && op1 == const0_rtx
6110 		   && mode == GET_MODE (op0)
6111 		   && nonzero_bits (op0, mode) == 1)
6112 	    {
6113 	      op0 = expand_compound_operation (op0);
6114 	      return plus_constant (mode, gen_lowpart (mode, op0), -1);
6115 	    }
6116 
6117 	  /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6118 	     one bit that might be nonzero, we can convert (ne x 0) to
6119 	     (ashift x c) where C puts the bit in the sign bit.  Remove any
6120 	     AND with STORE_FLAG_VALUE when we are done, since we are only
6121 	     going to test the sign bit.  */
6122 	  if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6123 	      && HWI_COMPUTABLE_MODE_P (mode)
6124 	      && val_signbit_p (mode, STORE_FLAG_VALUE)
6125 	      && op1 == const0_rtx
6126 	      && mode == GET_MODE (op0)
6127 	      && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
6128 	    {
6129 	      x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6130 					expand_compound_operation (op0),
6131 					GET_MODE_PRECISION (mode) - 1 - i);
6132 	      if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6133 		return XEXP (x, 0);
6134 	      else
6135 		return x;
6136 	    }
6137 
6138 	  /* If the code changed, return a whole new comparison.
6139 	     We also need to avoid using SUBST in cases where
6140 	     simplify_comparison has widened a comparison with a CONST_INT,
6141 	     since in that case the wider CONST_INT may fail the sanity
6142 	     checks in do_SUBST.  */
6143 	  if (new_code != code
6144 	      || (CONST_INT_P (op1)
6145 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6146 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6147 	    return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6148 
6149 	  /* Otherwise, keep this operation, but maybe change its operands.
6150 	     This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR).  */
6151 	  SUBST (XEXP (x, 0), op0);
6152 	  SUBST (XEXP (x, 1), op1);
6153 	}
6154       break;
6155 
6156     case IF_THEN_ELSE:
6157       return simplify_if_then_else (x);
6158 
6159     case ZERO_EXTRACT:
6160     case SIGN_EXTRACT:
6161     case ZERO_EXTEND:
6162     case SIGN_EXTEND:
6163       /* If we are processing SET_DEST, we are done.  */
6164       if (in_dest)
6165 	return x;
6166 
6167       return expand_compound_operation (x);
6168 
6169     case SET:
6170       return simplify_set (x);
6171 
6172     case AND:
6173     case IOR:
6174       return simplify_logical (x);
6175 
6176     case ASHIFT:
6177     case LSHIFTRT:
6178     case ASHIFTRT:
6179     case ROTATE:
6180     case ROTATERT:
6181       /* If this is a shift by a constant amount, simplify it.  */
6182       if (CONST_INT_P (XEXP (x, 1)))
6183 	return simplify_shift_const (x, code, mode, XEXP (x, 0),
6184 				     INTVAL (XEXP (x, 1)));
6185 
6186       else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6187 	SUBST (XEXP (x, 1),
6188 	       force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6189 			      ((unsigned HOST_WIDE_INT) 1
6190 			       << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6191 			      - 1,
6192 			      0));
6193       break;
6194 
6195     default:
6196       break;
6197     }
6198 
6199   return x;
6200 }
6201 
6202 /* Simplify X, an IF_THEN_ELSE expression.  Return the new expression.  */
6203 
6204 static rtx
6205 simplify_if_then_else (rtx x)
6206 {
6207   machine_mode mode = GET_MODE (x);
6208   rtx cond = XEXP (x, 0);
6209   rtx true_rtx = XEXP (x, 1);
6210   rtx false_rtx = XEXP (x, 2);
6211   enum rtx_code true_code = GET_CODE (cond);
6212   int comparison_p = COMPARISON_P (cond);
6213   rtx temp;
6214   int i;
6215   enum rtx_code false_code;
6216   rtx reversed;
6217 
6218   /* Simplify storing of the truth value.  */
6219   if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6220     return simplify_gen_relational (true_code, mode, VOIDmode,
6221 				    XEXP (cond, 0), XEXP (cond, 1));
6222 
6223   /* Also when the truth value has to be reversed.  */
6224   if (comparison_p
6225       && true_rtx == const0_rtx && false_rtx == const_true_rtx
6226       && (reversed = reversed_comparison (cond, mode)))
6227     return reversed;
6228 
6229   /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6230      in it is being compared against certain values.  Get the true and false
6231      comparisons and see if that says anything about the value of each arm.  */
6232 
6233   if (comparison_p
6234       && ((false_code = reversed_comparison_code (cond, NULL))
6235 	  != UNKNOWN)
6236       && REG_P (XEXP (cond, 0)))
6237     {
6238       HOST_WIDE_INT nzb;
6239       rtx from = XEXP (cond, 0);
6240       rtx true_val = XEXP (cond, 1);
6241       rtx false_val = true_val;
6242       int swapped = 0;
6243 
6244       /* If FALSE_CODE is EQ, swap the codes and arms.  */
6245 
6246       if (false_code == EQ)
6247 	{
6248 	  swapped = 1, true_code = EQ, false_code = NE;
6249 	  std::swap (true_rtx, false_rtx);
6250 	}
6251 
6252       /* If we are comparing against zero and the expression being tested has
6253 	 only a single bit that might be nonzero, that is its value when it is
6254 	 not equal to zero.  Similarly if it is known to be -1 or 0.  */
6255 
6256       if (true_code == EQ && true_val == const0_rtx
6257 	  && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
6258 	{
6259 	  false_code = EQ;
6260 	  false_val = gen_int_mode (nzb, GET_MODE (from));
6261 	}
6262       else if (true_code == EQ && true_val == const0_rtx
6263 	       && (num_sign_bit_copies (from, GET_MODE (from))
6264 		   == GET_MODE_PRECISION (GET_MODE (from))))
6265 	{
6266 	  false_code = EQ;
6267 	  false_val = constm1_rtx;
6268 	}
6269 
6270       /* Now simplify an arm if we know the value of the register in the
6271 	 branch and it is used in the arm.  Be careful due to the potential
6272 	 of locally-shared RTL.  */
6273 
6274       if (reg_mentioned_p (from, true_rtx))
6275 	true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6276 				      from, true_val),
6277 			  pc_rtx, pc_rtx, 0, 0, 0);
6278       if (reg_mentioned_p (from, false_rtx))
6279 	false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6280 				       from, false_val),
6281 			   pc_rtx, pc_rtx, 0, 0, 0);
6282 
6283       SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6284       SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6285 
6286       true_rtx = XEXP (x, 1);
6287       false_rtx = XEXP (x, 2);
6288       true_code = GET_CODE (cond);
6289     }
6290 
6291   /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6292      reversed, do so to avoid needing two sets of patterns for
6293      subtract-and-branch insns.  Similarly if we have a constant in the true
6294      arm, the false arm is the same as the first operand of the comparison, or
6295      the false arm is more complicated than the true arm.  */
6296 
6297   if (comparison_p
6298       && reversed_comparison_code (cond, NULL) != UNKNOWN
6299       && (true_rtx == pc_rtx
6300 	  || (CONSTANT_P (true_rtx)
6301 	      && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6302 	  || true_rtx == const0_rtx
6303 	  || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6304 	  || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6305 	      && !OBJECT_P (false_rtx))
6306 	  || reg_mentioned_p (true_rtx, false_rtx)
6307 	  || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6308     {
6309       true_code = reversed_comparison_code (cond, NULL);
6310       SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6311       SUBST (XEXP (x, 1), false_rtx);
6312       SUBST (XEXP (x, 2), true_rtx);
6313 
6314       std::swap (true_rtx, false_rtx);
6315       cond = XEXP (x, 0);
6316 
6317       /* It is possible that the conditional has been simplified out.  */
6318       true_code = GET_CODE (cond);
6319       comparison_p = COMPARISON_P (cond);
6320     }
6321 
6322   /* If the two arms are identical, we don't need the comparison.  */
6323 
6324   if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6325     return true_rtx;
6326 
6327   /* Convert a == b ? b : a to "a".  */
6328   if (true_code == EQ && ! side_effects_p (cond)
6329       && !HONOR_NANS (mode)
6330       && rtx_equal_p (XEXP (cond, 0), false_rtx)
6331       && rtx_equal_p (XEXP (cond, 1), true_rtx))
6332     return false_rtx;
6333   else if (true_code == NE && ! side_effects_p (cond)
6334 	   && !HONOR_NANS (mode)
6335 	   && rtx_equal_p (XEXP (cond, 0), true_rtx)
6336 	   && rtx_equal_p (XEXP (cond, 1), false_rtx))
6337     return true_rtx;
6338 
6339   /* Look for cases where we have (abs x) or (neg (abs X)).  */
6340 
6341   if (GET_MODE_CLASS (mode) == MODE_INT
6342       && comparison_p
6343       && XEXP (cond, 1) == const0_rtx
6344       && GET_CODE (false_rtx) == NEG
6345       && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6346       && rtx_equal_p (true_rtx, XEXP (cond, 0))
6347       && ! side_effects_p (true_rtx))
6348     switch (true_code)
6349       {
6350       case GT:
6351       case GE:
6352 	return simplify_gen_unary (ABS, mode, true_rtx, mode);
6353       case LT:
6354       case LE:
6355 	return
6356 	  simplify_gen_unary (NEG, mode,
6357 			      simplify_gen_unary (ABS, mode, true_rtx, mode),
6358 			      mode);
6359       default:
6360 	break;
6361       }
6362 
6363   /* Look for MIN or MAX.  */
6364 
6365   if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6366       && comparison_p
6367       && rtx_equal_p (XEXP (cond, 0), true_rtx)
6368       && rtx_equal_p (XEXP (cond, 1), false_rtx)
6369       && ! side_effects_p (cond))
6370     switch (true_code)
6371       {
6372       case GE:
6373       case GT:
6374 	return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6375       case LE:
6376       case LT:
6377 	return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6378       case GEU:
6379       case GTU:
6380 	return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6381       case LEU:
6382       case LTU:
6383 	return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6384       default:
6385 	break;
6386       }
6387 
6388   /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6389      second operand is zero, this can be done as (OP Z (mult COND C2)) where
6390      C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6391      SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6392      We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6393      neither 1 or -1, but it isn't worth checking for.  */
6394 
6395   if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6396       && comparison_p
6397       && GET_MODE_CLASS (mode) == MODE_INT
6398       && ! side_effects_p (x))
6399     {
6400       rtx t = make_compound_operation (true_rtx, SET);
6401       rtx f = make_compound_operation (false_rtx, SET);
6402       rtx cond_op0 = XEXP (cond, 0);
6403       rtx cond_op1 = XEXP (cond, 1);
6404       enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6405       machine_mode m = mode;
6406       rtx z = 0, c1 = NULL_RTX;
6407 
6408       if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6409 	   || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6410 	   || GET_CODE (t) == ASHIFT
6411 	   || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6412 	  && rtx_equal_p (XEXP (t, 0), f))
6413 	c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6414 
6415       /* If an identity-zero op is commutative, check whether there
6416 	 would be a match if we swapped the operands.  */
6417       else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6418 		|| GET_CODE (t) == XOR)
6419 	       && rtx_equal_p (XEXP (t, 1), f))
6420 	c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6421       else if (GET_CODE (t) == SIGN_EXTEND
6422 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6423 		   || GET_CODE (XEXP (t, 0)) == MINUS
6424 		   || GET_CODE (XEXP (t, 0)) == IOR
6425 		   || GET_CODE (XEXP (t, 0)) == XOR
6426 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6427 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6428 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6429 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6430 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6431 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6432 	       && (num_sign_bit_copies (f, GET_MODE (f))
6433 		   > (unsigned int)
6434 		     (GET_MODE_PRECISION (mode)
6435 		      - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6436 	{
6437 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6438 	  extend_op = SIGN_EXTEND;
6439 	  m = GET_MODE (XEXP (t, 0));
6440 	}
6441       else if (GET_CODE (t) == SIGN_EXTEND
6442 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6443 		   || GET_CODE (XEXP (t, 0)) == IOR
6444 		   || GET_CODE (XEXP (t, 0)) == XOR)
6445 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6446 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6447 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6448 	       && (num_sign_bit_copies (f, GET_MODE (f))
6449 		   > (unsigned int)
6450 		     (GET_MODE_PRECISION (mode)
6451 		      - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6452 	{
6453 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6454 	  extend_op = SIGN_EXTEND;
6455 	  m = GET_MODE (XEXP (t, 0));
6456 	}
6457       else if (GET_CODE (t) == ZERO_EXTEND
6458 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6459 		   || GET_CODE (XEXP (t, 0)) == MINUS
6460 		   || GET_CODE (XEXP (t, 0)) == IOR
6461 		   || GET_CODE (XEXP (t, 0)) == XOR
6462 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6463 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6464 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6465 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6466 	       && HWI_COMPUTABLE_MODE_P (mode)
6467 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6468 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6469 	       && ((nonzero_bits (f, GET_MODE (f))
6470 		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6471 		   == 0))
6472 	{
6473 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6474 	  extend_op = ZERO_EXTEND;
6475 	  m = GET_MODE (XEXP (t, 0));
6476 	}
6477       else if (GET_CODE (t) == ZERO_EXTEND
6478 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6479 		   || GET_CODE (XEXP (t, 0)) == IOR
6480 		   || GET_CODE (XEXP (t, 0)) == XOR)
6481 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6482 	       && HWI_COMPUTABLE_MODE_P (mode)
6483 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6484 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6485 	       && ((nonzero_bits (f, GET_MODE (f))
6486 		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6487 		   == 0))
6488 	{
6489 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6490 	  extend_op = ZERO_EXTEND;
6491 	  m = GET_MODE (XEXP (t, 0));
6492 	}
6493 
6494       if (z)
6495 	{
6496 	  temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6497 						 cond_op0, cond_op1),
6498 			pc_rtx, pc_rtx, 0, 0, 0);
6499 	  temp = simplify_gen_binary (MULT, m, temp,
6500 				      simplify_gen_binary (MULT, m, c1,
6501 							   const_true_rtx));
6502 	  temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6503 	  temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6504 
6505 	  if (extend_op != UNKNOWN)
6506 	    temp = simplify_gen_unary (extend_op, mode, temp, m);
6507 
6508 	  return temp;
6509 	}
6510     }
6511 
6512   /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6513      1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6514      negation of a single bit, we can convert this operation to a shift.  We
6515      can actually do this more generally, but it doesn't seem worth it.  */
6516 
6517   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6518       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6519       && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6520 	   && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6521 	  || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6522 	       == GET_MODE_PRECISION (mode))
6523 	      && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6524     return
6525       simplify_shift_const (NULL_RTX, ASHIFT, mode,
6526 			    gen_lowpart (mode, XEXP (cond, 0)), i);
6527 
6528   /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8.  */
6529   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6530       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6531       && GET_MODE (XEXP (cond, 0)) == mode
6532       && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6533 	  == nonzero_bits (XEXP (cond, 0), mode)
6534       && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6535     return XEXP (cond, 0);
6536 
6537   return x;
6538 }
6539 
6540 /* Simplify X, a SET expression.  Return the new expression.  */
6541 
6542 static rtx
6543 simplify_set (rtx x)
6544 {
6545   rtx src = SET_SRC (x);
6546   rtx dest = SET_DEST (x);
6547   machine_mode mode
6548     = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6549   rtx_insn *other_insn;
6550   rtx *cc_use;
6551 
6552   /* (set (pc) (return)) gets written as (return).  */
6553   if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6554     return src;
6555 
6556   /* Now that we know for sure which bits of SRC we are using, see if we can
6557      simplify the expression for the object knowing that we only need the
6558      low-order bits.  */
6559 
6560   if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6561     {
6562       src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6563       SUBST (SET_SRC (x), src);
6564     }
6565 
6566   /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6567      the comparison result and try to simplify it unless we already have used
6568      undobuf.other_insn.  */
6569   if ((GET_MODE_CLASS (mode) == MODE_CC
6570        || GET_CODE (src) == COMPARE
6571        || CC0_P (dest))
6572       && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6573       && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6574       && COMPARISON_P (*cc_use)
6575       && rtx_equal_p (XEXP (*cc_use, 0), dest))
6576     {
6577       enum rtx_code old_code = GET_CODE (*cc_use);
6578       enum rtx_code new_code;
6579       rtx op0, op1, tmp;
6580       int other_changed = 0;
6581       rtx inner_compare = NULL_RTX;
6582       machine_mode compare_mode = GET_MODE (dest);
6583 
6584       if (GET_CODE (src) == COMPARE)
6585 	{
6586 	  op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6587 	  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6588 	    {
6589 	      inner_compare = op0;
6590 	      op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6591 	    }
6592 	}
6593       else
6594 	op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6595 
6596       tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6597 					   op0, op1);
6598       if (!tmp)
6599 	new_code = old_code;
6600       else if (!CONSTANT_P (tmp))
6601 	{
6602 	  new_code = GET_CODE (tmp);
6603 	  op0 = XEXP (tmp, 0);
6604 	  op1 = XEXP (tmp, 1);
6605 	}
6606       else
6607 	{
6608 	  rtx pat = PATTERN (other_insn);
6609 	  undobuf.other_insn = other_insn;
6610 	  SUBST (*cc_use, tmp);
6611 
6612 	  /* Attempt to simplify CC user.  */
6613 	  if (GET_CODE (pat) == SET)
6614 	    {
6615 	      rtx new_rtx = simplify_rtx (SET_SRC (pat));
6616 	      if (new_rtx != NULL_RTX)
6617 		SUBST (SET_SRC (pat), new_rtx);
6618 	    }
6619 
6620 	  /* Convert X into a no-op move.  */
6621 	  SUBST (SET_DEST (x), pc_rtx);
6622 	  SUBST (SET_SRC (x), pc_rtx);
6623 	  return x;
6624 	}
6625 
6626       /* Simplify our comparison, if possible.  */
6627       new_code = simplify_comparison (new_code, &op0, &op1);
6628 
6629 #ifdef SELECT_CC_MODE
6630       /* If this machine has CC modes other than CCmode, check to see if we
6631 	 need to use a different CC mode here.  */
6632       if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6633 	compare_mode = GET_MODE (op0);
6634       else if (inner_compare
6635 	       && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6636 	       && new_code == old_code
6637 	       && op0 == XEXP (inner_compare, 0)
6638 	       && op1 == XEXP (inner_compare, 1))
6639 	compare_mode = GET_MODE (inner_compare);
6640       else
6641 	compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6642 
6643       /* If the mode changed, we have to change SET_DEST, the mode in the
6644 	 compare, and the mode in the place SET_DEST is used.  If SET_DEST is
6645 	 a hard register, just build new versions with the proper mode.  If it
6646 	 is a pseudo, we lose unless it is only time we set the pseudo, in
6647 	 which case we can safely change its mode.  */
6648       if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6649 	{
6650 	  if (can_change_dest_mode (dest, 0, compare_mode))
6651 	    {
6652 	      unsigned int regno = REGNO (dest);
6653 	      rtx new_dest;
6654 
6655 	      if (regno < FIRST_PSEUDO_REGISTER)
6656 		new_dest = gen_rtx_REG (compare_mode, regno);
6657 	      else
6658 		{
6659 		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6660 		  new_dest = regno_reg_rtx[regno];
6661 		}
6662 
6663 	      SUBST (SET_DEST (x), new_dest);
6664 	      SUBST (XEXP (*cc_use, 0), new_dest);
6665 	      other_changed = 1;
6666 
6667 	      dest = new_dest;
6668 	    }
6669 	}
6670 #endif  /* SELECT_CC_MODE */
6671 
6672       /* If the code changed, we have to build a new comparison in
6673 	 undobuf.other_insn.  */
6674       if (new_code != old_code)
6675 	{
6676 	  int other_changed_previously = other_changed;
6677 	  unsigned HOST_WIDE_INT mask;
6678 	  rtx old_cc_use = *cc_use;
6679 
6680 	  SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6681 					  dest, const0_rtx));
6682 	  other_changed = 1;
6683 
6684 	  /* If the only change we made was to change an EQ into an NE or
6685 	     vice versa, OP0 has only one bit that might be nonzero, and OP1
6686 	     is zero, check if changing the user of the condition code will
6687 	     produce a valid insn.  If it won't, we can keep the original code
6688 	     in that insn by surrounding our operation with an XOR.  */
6689 
6690 	  if (((old_code == NE && new_code == EQ)
6691 	       || (old_code == EQ && new_code == NE))
6692 	      && ! other_changed_previously && op1 == const0_rtx
6693 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6694 	      && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6695 	    {
6696 	      rtx pat = PATTERN (other_insn), note = 0;
6697 
6698 	      if ((recog_for_combine (&pat, other_insn, &note) < 0
6699 		   && ! check_asm_operands (pat)))
6700 		{
6701 		  *cc_use = old_cc_use;
6702 		  other_changed = 0;
6703 
6704 		  op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6705 					     gen_int_mode (mask,
6706 							   GET_MODE (op0)));
6707 		}
6708 	    }
6709 	}
6710 
6711       if (other_changed)
6712 	undobuf.other_insn = other_insn;
6713 
6714       /* Don't generate a compare of a CC with 0, just use that CC.  */
6715       if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6716 	{
6717 	  SUBST (SET_SRC (x), op0);
6718 	  src = SET_SRC (x);
6719 	}
6720       /* Otherwise, if we didn't previously have the same COMPARE we
6721 	 want, create it from scratch.  */
6722       else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6723 	       || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6724 	{
6725 	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6726 	  src = SET_SRC (x);
6727 	}
6728     }
6729   else
6730     {
6731       /* Get SET_SRC in a form where we have placed back any
6732 	 compound expressions.  Then do the checks below.  */
6733       src = make_compound_operation (src, SET);
6734       SUBST (SET_SRC (x), src);
6735     }
6736 
6737   /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6738      and X being a REG or (subreg (reg)), we may be able to convert this to
6739      (set (subreg:m2 x) (op)).
6740 
6741      We can always do this if M1 is narrower than M2 because that means that
6742      we only care about the low bits of the result.
6743 
6744      However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6745      perform a narrower operation than requested since the high-order bits will
6746      be undefined.  On machine where it is defined, this transformation is safe
6747      as long as M1 and M2 have the same number of words.  */
6748 
6749   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6750       && !OBJECT_P (SUBREG_REG (src))
6751       && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6752 	   / UNITS_PER_WORD)
6753 	  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6754 	       + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6755       && (WORD_REGISTER_OPERATIONS
6756 	  || (GET_MODE_SIZE (GET_MODE (src))
6757 	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6758 #ifdef CANNOT_CHANGE_MODE_CLASS
6759       && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6760 	    && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6761 					 GET_MODE (SUBREG_REG (src)),
6762 					 GET_MODE (src)))
6763 #endif
6764       && (REG_P (dest)
6765 	  || (GET_CODE (dest) == SUBREG
6766 	      && REG_P (SUBREG_REG (dest)))))
6767     {
6768       SUBST (SET_DEST (x),
6769 	     gen_lowpart (GET_MODE (SUBREG_REG (src)),
6770 				      dest));
6771       SUBST (SET_SRC (x), SUBREG_REG (src));
6772 
6773       src = SET_SRC (x), dest = SET_DEST (x);
6774     }
6775 
6776   /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6777      in SRC.  */
6778   if (dest == cc0_rtx
6779       && GET_CODE (src) == SUBREG
6780       && subreg_lowpart_p (src)
6781       && (GET_MODE_PRECISION (GET_MODE (src))
6782 	  < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6783     {
6784       rtx inner = SUBREG_REG (src);
6785       machine_mode inner_mode = GET_MODE (inner);
6786 
6787       /* Here we make sure that we don't have a sign bit on.  */
6788       if (val_signbit_known_clear_p (GET_MODE (src),
6789 				     nonzero_bits (inner, inner_mode)))
6790 	{
6791 	  SUBST (SET_SRC (x), inner);
6792 	  src = SET_SRC (x);
6793 	}
6794     }
6795 
6796   /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6797      would require a paradoxical subreg.  Replace the subreg with a
6798      zero_extend to avoid the reload that would otherwise be required.  */
6799 
6800   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6801       && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6802       && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6803       && SUBREG_BYTE (src) == 0
6804       && paradoxical_subreg_p (src)
6805       && MEM_P (SUBREG_REG (src)))
6806     {
6807       SUBST (SET_SRC (x),
6808 	     gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6809 			    GET_MODE (src), SUBREG_REG (src)));
6810 
6811       src = SET_SRC (x);
6812     }
6813 
6814   /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6815      are comparing an item known to be 0 or -1 against 0, use a logical
6816      operation instead. Check for one of the arms being an IOR of the other
6817      arm with some value.  We compute three terms to be IOR'ed together.  In
6818      practice, at most two will be nonzero.  Then we do the IOR's.  */
6819 
6820   if (GET_CODE (dest) != PC
6821       && GET_CODE (src) == IF_THEN_ELSE
6822       && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6823       && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6824       && XEXP (XEXP (src, 0), 1) == const0_rtx
6825       && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6826       && (!HAVE_conditional_move
6827 	  || ! can_conditionally_move_p (GET_MODE (src)))
6828       && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6829 			       GET_MODE (XEXP (XEXP (src, 0), 0)))
6830 	  == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6831       && ! side_effects_p (src))
6832     {
6833       rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6834 		      ? XEXP (src, 1) : XEXP (src, 2));
6835       rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6836 		   ? XEXP (src, 2) : XEXP (src, 1));
6837       rtx term1 = const0_rtx, term2, term3;
6838 
6839       if (GET_CODE (true_rtx) == IOR
6840 	  && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6841 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6842       else if (GET_CODE (true_rtx) == IOR
6843 	       && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6844 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6845       else if (GET_CODE (false_rtx) == IOR
6846 	       && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6847 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6848       else if (GET_CODE (false_rtx) == IOR
6849 	       && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6850 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6851 
6852       term2 = simplify_gen_binary (AND, GET_MODE (src),
6853 				   XEXP (XEXP (src, 0), 0), true_rtx);
6854       term3 = simplify_gen_binary (AND, GET_MODE (src),
6855 				   simplify_gen_unary (NOT, GET_MODE (src),
6856 						       XEXP (XEXP (src, 0), 0),
6857 						       GET_MODE (src)),
6858 				   false_rtx);
6859 
6860       SUBST (SET_SRC (x),
6861 	     simplify_gen_binary (IOR, GET_MODE (src),
6862 				  simplify_gen_binary (IOR, GET_MODE (src),
6863 						       term1, term2),
6864 				  term3));
6865 
6866       src = SET_SRC (x);
6867     }
6868 
6869   /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6870      whole thing fail.  */
6871   if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6872     return src;
6873   else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6874     return dest;
6875   else
6876     /* Convert this into a field assignment operation, if possible.  */
6877     return make_field_assignment (x);
6878 }
6879 
6880 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6881    result.  */
6882 
6883 static rtx
6884 simplify_logical (rtx x)
6885 {
6886   machine_mode mode = GET_MODE (x);
6887   rtx op0 = XEXP (x, 0);
6888   rtx op1 = XEXP (x, 1);
6889 
6890   switch (GET_CODE (x))
6891     {
6892     case AND:
6893       /* We can call simplify_and_const_int only if we don't lose
6894 	 any (sign) bits when converting INTVAL (op1) to
6895 	 "unsigned HOST_WIDE_INT".  */
6896       if (CONST_INT_P (op1)
6897 	  && (HWI_COMPUTABLE_MODE_P (mode)
6898 	      || INTVAL (op1) > 0))
6899 	{
6900 	  x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6901 	  if (GET_CODE (x) != AND)
6902 	    return x;
6903 
6904 	  op0 = XEXP (x, 0);
6905 	  op1 = XEXP (x, 1);
6906 	}
6907 
6908       /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6909 	 apply the distributive law and then the inverse distributive
6910 	 law to see if things simplify.  */
6911       if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6912 	{
6913 	  rtx result = distribute_and_simplify_rtx (x, 0);
6914 	  if (result)
6915 	    return result;
6916 	}
6917       if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6918 	{
6919 	  rtx result = distribute_and_simplify_rtx (x, 1);
6920 	  if (result)
6921 	    return result;
6922 	}
6923       break;
6924 
6925     case IOR:
6926       /* If we have (ior (and A B) C), apply the distributive law and then
6927 	 the inverse distributive law to see if things simplify.  */
6928 
6929       if (GET_CODE (op0) == AND)
6930 	{
6931 	  rtx result = distribute_and_simplify_rtx (x, 0);
6932 	  if (result)
6933 	    return result;
6934 	}
6935 
6936       if (GET_CODE (op1) == AND)
6937 	{
6938 	  rtx result = distribute_and_simplify_rtx (x, 1);
6939 	  if (result)
6940 	    return result;
6941 	}
6942       break;
6943 
6944     default:
6945       gcc_unreachable ();
6946     }
6947 
6948   return x;
6949 }
6950 
6951 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6952    operations" because they can be replaced with two more basic operations.
6953    ZERO_EXTEND is also considered "compound" because it can be replaced with
6954    an AND operation, which is simpler, though only one operation.
6955 
6956    The function expand_compound_operation is called with an rtx expression
6957    and will convert it to the appropriate shifts and AND operations,
6958    simplifying at each stage.
6959 
6960    The function make_compound_operation is called to convert an expression
6961    consisting of shifts and ANDs into the equivalent compound expression.
6962    It is the inverse of this function, loosely speaking.  */
6963 
6964 static rtx
6965 expand_compound_operation (rtx x)
6966 {
6967   unsigned HOST_WIDE_INT pos = 0, len;
6968   int unsignedp = 0;
6969   unsigned int modewidth;
6970   rtx tem;
6971 
6972   switch (GET_CODE (x))
6973     {
6974     case ZERO_EXTEND:
6975       unsignedp = 1;
6976     case SIGN_EXTEND:
6977       /* We can't necessarily use a const_int for a multiword mode;
6978 	 it depends on implicitly extending the value.
6979 	 Since we don't know the right way to extend it,
6980 	 we can't tell whether the implicit way is right.
6981 
6982 	 Even for a mode that is no wider than a const_int,
6983 	 we can't win, because we need to sign extend one of its bits through
6984 	 the rest of it, and we don't know which bit.  */
6985       if (CONST_INT_P (XEXP (x, 0)))
6986 	return x;
6987 
6988       /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6989 	 (zero_extend:MODE FROM) or (sign_extend:MODE FROM).  It is for any MEM
6990 	 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6991 	 reloaded. If not for that, MEM's would very rarely be safe.
6992 
6993 	 Reject MODEs bigger than a word, because we might not be able
6994 	 to reference a two-register group starting with an arbitrary register
6995 	 (and currently gen_lowpart might crash for a SUBREG).  */
6996 
6997       if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6998 	return x;
6999 
7000       /* Reject MODEs that aren't scalar integers because turning vector
7001 	 or complex modes into shifts causes problems.  */
7002 
7003       if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
7004 	return x;
7005 
7006       len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
7007       /* If the inner object has VOIDmode (the only way this can happen
7008 	 is if it is an ASM_OPERANDS), we can't do anything since we don't
7009 	 know how much masking to do.  */
7010       if (len == 0)
7011 	return x;
7012 
7013       break;
7014 
7015     case ZERO_EXTRACT:
7016       unsignedp = 1;
7017 
7018       /* ... fall through ...  */
7019 
7020     case SIGN_EXTRACT:
7021       /* If the operand is a CLOBBER, just return it.  */
7022       if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7023 	return XEXP (x, 0);
7024 
7025       if (!CONST_INT_P (XEXP (x, 1))
7026 	  || !CONST_INT_P (XEXP (x, 2))
7027 	  || GET_MODE (XEXP (x, 0)) == VOIDmode)
7028 	return x;
7029 
7030       /* Reject MODEs that aren't scalar integers because turning vector
7031 	 or complex modes into shifts causes problems.  */
7032 
7033       if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
7034 	return x;
7035 
7036       len = INTVAL (XEXP (x, 1));
7037       pos = INTVAL (XEXP (x, 2));
7038 
7039       /* This should stay within the object being extracted, fail otherwise.  */
7040       if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
7041 	return x;
7042 
7043       if (BITS_BIG_ENDIAN)
7044 	pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
7045 
7046       break;
7047 
7048     default:
7049       return x;
7050     }
7051   /* Convert sign extension to zero extension, if we know that the high
7052      bit is not set, as this is easier to optimize.  It will be converted
7053      back to cheaper alternative in make_extraction.  */
7054   if (GET_CODE (x) == SIGN_EXTEND
7055       && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7056 	  && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
7057 		& ~(((unsigned HOST_WIDE_INT)
7058 		      GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
7059 		     >> 1))
7060 	       == 0)))
7061     {
7062       machine_mode mode = GET_MODE (x);
7063       rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7064       rtx temp2 = expand_compound_operation (temp);
7065 
7066       /* Make sure this is a profitable operation.  */
7067       if (set_src_cost (x, mode, optimize_this_for_speed_p)
7068           > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7069        return temp2;
7070       else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7071                > set_src_cost (temp, mode, optimize_this_for_speed_p))
7072        return temp;
7073       else
7074        return x;
7075     }
7076 
7077   /* We can optimize some special cases of ZERO_EXTEND.  */
7078   if (GET_CODE (x) == ZERO_EXTEND)
7079     {
7080       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7081 	 know that the last value didn't have any inappropriate bits
7082 	 set.  */
7083       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7084 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7085 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7086 	  && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
7087 	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7088 	return XEXP (XEXP (x, 0), 0);
7089 
7090       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7091       if (GET_CODE (XEXP (x, 0)) == SUBREG
7092 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7093 	  && subreg_lowpart_p (XEXP (x, 0))
7094 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7095 	  && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
7096 	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7097 	return SUBREG_REG (XEXP (x, 0));
7098 
7099       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7100 	 is a comparison and STORE_FLAG_VALUE permits.  This is like
7101 	 the first case, but it works even when GET_MODE (x) is larger
7102 	 than HOST_WIDE_INT.  */
7103       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7104 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7105 	  && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7106 	  && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7107 	      <= HOST_BITS_PER_WIDE_INT)
7108 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7109 	return XEXP (XEXP (x, 0), 0);
7110 
7111       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7112       if (GET_CODE (XEXP (x, 0)) == SUBREG
7113 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7114 	  && subreg_lowpart_p (XEXP (x, 0))
7115 	  && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7116 	  && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7117 	      <= HOST_BITS_PER_WIDE_INT)
7118 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7119 	return SUBREG_REG (XEXP (x, 0));
7120 
7121     }
7122 
7123   /* If we reach here, we want to return a pair of shifts.  The inner
7124      shift is a left shift of BITSIZE - POS - LEN bits.  The outer
7125      shift is a right shift of BITSIZE - LEN bits.  It is arithmetic or
7126      logical depending on the value of UNSIGNEDP.
7127 
7128      If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7129      converted into an AND of a shift.
7130 
7131      We must check for the case where the left shift would have a negative
7132      count.  This can happen in a case like (x >> 31) & 255 on machines
7133      that can't shift by a constant.  On those machines, we would first
7134      combine the shift with the AND to produce a variable-position
7135      extraction.  Then the constant of 31 would be substituted in
7136      to produce such a position.  */
7137 
7138   modewidth = GET_MODE_PRECISION (GET_MODE (x));
7139   if (modewidth >= pos + len)
7140     {
7141       machine_mode mode = GET_MODE (x);
7142       tem = gen_lowpart (mode, XEXP (x, 0));
7143       if (!tem || GET_CODE (tem) == CLOBBER)
7144 	return x;
7145       tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7146 				  tem, modewidth - pos - len);
7147       tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7148 				  mode, tem, modewidth - len);
7149     }
7150   else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7151     tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
7152 				  simplify_shift_const (NULL_RTX, LSHIFTRT,
7153 							GET_MODE (x),
7154 							XEXP (x, 0), pos),
7155 				  ((unsigned HOST_WIDE_INT) 1 << len) - 1);
7156   else
7157     /* Any other cases we can't handle.  */
7158     return x;
7159 
7160   /* If we couldn't do this for some reason, return the original
7161      expression.  */
7162   if (GET_CODE (tem) == CLOBBER)
7163     return x;
7164 
7165   return tem;
7166 }
7167 
7168 /* X is a SET which contains an assignment of one object into
7169    a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7170    or certain SUBREGS). If possible, convert it into a series of
7171    logical operations.
7172 
7173    We half-heartedly support variable positions, but do not at all
7174    support variable lengths.  */
7175 
7176 static const_rtx
7177 expand_field_assignment (const_rtx x)
7178 {
7179   rtx inner;
7180   rtx pos;			/* Always counts from low bit.  */
7181   int len;
7182   rtx mask, cleared, masked;
7183   machine_mode compute_mode;
7184 
7185   /* Loop until we find something we can't simplify.  */
7186   while (1)
7187     {
7188       if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7189 	  && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7190 	{
7191 	  inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7192 	  len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7193 	  pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7194 	}
7195       else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7196 	       && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7197 	{
7198 	  inner = XEXP (SET_DEST (x), 0);
7199 	  len = INTVAL (XEXP (SET_DEST (x), 1));
7200 	  pos = XEXP (SET_DEST (x), 2);
7201 
7202 	  /* A constant position should stay within the width of INNER.  */
7203 	  if (CONST_INT_P (pos)
7204 	      && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7205 	    break;
7206 
7207 	  if (BITS_BIG_ENDIAN)
7208 	    {
7209 	      if (CONST_INT_P (pos))
7210 		pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7211 			       - INTVAL (pos));
7212 	      else if (GET_CODE (pos) == MINUS
7213 		       && CONST_INT_P (XEXP (pos, 1))
7214 		       && (INTVAL (XEXP (pos, 1))
7215 			   == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7216 		/* If position is ADJUST - X, new position is X.  */
7217 		pos = XEXP (pos, 0);
7218 	      else
7219 		{
7220 		  HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7221 		  pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7222 					     gen_int_mode (prec - len,
7223 							   GET_MODE (pos)),
7224 					     pos);
7225 		}
7226 	    }
7227 	}
7228 
7229       /* A SUBREG between two modes that occupy the same numbers of words
7230 	 can be done by moving the SUBREG to the source.  */
7231       else if (GET_CODE (SET_DEST (x)) == SUBREG
7232 	       /* We need SUBREGs to compute nonzero_bits properly.  */
7233 	       && nonzero_sign_valid
7234 	       && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7235 		     + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7236 		   == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7237 			+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7238 	{
7239 	  x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7240 			   gen_lowpart
7241 			   (GET_MODE (SUBREG_REG (SET_DEST (x))),
7242 			    SET_SRC (x)));
7243 	  continue;
7244 	}
7245       else
7246 	break;
7247 
7248       while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7249 	inner = SUBREG_REG (inner);
7250 
7251       compute_mode = GET_MODE (inner);
7252 
7253       /* Don't attempt bitwise arithmetic on non scalar integer modes.  */
7254       if (! SCALAR_INT_MODE_P (compute_mode))
7255 	{
7256 	  machine_mode imode;
7257 
7258 	  /* Don't do anything for vector or complex integral types.  */
7259 	  if (! FLOAT_MODE_P (compute_mode))
7260 	    break;
7261 
7262 	  /* Try to find an integral mode to pun with.  */
7263 	  imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7264 	  if (imode == BLKmode)
7265 	    break;
7266 
7267 	  compute_mode = imode;
7268 	  inner = gen_lowpart (imode, inner);
7269 	}
7270 
7271       /* Compute a mask of LEN bits, if we can do this on the host machine.  */
7272       if (len >= HOST_BITS_PER_WIDE_INT)
7273 	break;
7274 
7275       /* Don't try to compute in too wide unsupported modes.  */
7276       if (!targetm.scalar_mode_supported_p (compute_mode))
7277 	break;
7278 
7279       /* Now compute the equivalent expression.  Make a copy of INNER
7280 	 for the SET_DEST in case it is a MEM into which we will substitute;
7281 	 we don't want shared RTL in that case.  */
7282       mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << len) - 1,
7283 			   compute_mode);
7284       cleared = simplify_gen_binary (AND, compute_mode,
7285 				     simplify_gen_unary (NOT, compute_mode,
7286 				       simplify_gen_binary (ASHIFT,
7287 							    compute_mode,
7288 							    mask, pos),
7289 				       compute_mode),
7290 				     inner);
7291       masked = simplify_gen_binary (ASHIFT, compute_mode,
7292 				    simplify_gen_binary (
7293 				      AND, compute_mode,
7294 				      gen_lowpart (compute_mode, SET_SRC (x)),
7295 				      mask),
7296 				    pos);
7297 
7298       x = gen_rtx_SET (copy_rtx (inner),
7299 		       simplify_gen_binary (IOR, compute_mode,
7300 					    cleared, masked));
7301     }
7302 
7303   return x;
7304 }
7305 
7306 /* Return an RTX for a reference to LEN bits of INNER.  If POS_RTX is nonzero,
7307    it is an RTX that represents the (variable) starting position; otherwise,
7308    POS is the (constant) starting bit position.  Both are counted from the LSB.
7309 
7310    UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7311 
7312    IN_DEST is nonzero if this is a reference in the destination of a SET.
7313    This is used when a ZERO_ or SIGN_EXTRACT isn't needed.  If nonzero,
7314    a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7315    be used.
7316 
7317    IN_COMPARE is nonzero if we are in a COMPARE.  This means that a
7318    ZERO_EXTRACT should be built even for bits starting at bit 0.
7319 
7320    MODE is the desired mode of the result (if IN_DEST == 0).
7321 
7322    The result is an RTX for the extraction or NULL_RTX if the target
7323    can't handle it.  */
7324 
7325 static rtx
7326 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7327 		 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7328 		 int in_dest, int in_compare)
7329 {
7330   /* This mode describes the size of the storage area
7331      to fetch the overall value from.  Within that, we
7332      ignore the POS lowest bits, etc.  */
7333   machine_mode is_mode = GET_MODE (inner);
7334   machine_mode inner_mode;
7335   machine_mode wanted_inner_mode;
7336   machine_mode wanted_inner_reg_mode = word_mode;
7337   machine_mode pos_mode = word_mode;
7338   machine_mode extraction_mode = word_mode;
7339   machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7340   rtx new_rtx = 0;
7341   rtx orig_pos_rtx = pos_rtx;
7342   HOST_WIDE_INT orig_pos;
7343 
7344   if (pos_rtx && CONST_INT_P (pos_rtx))
7345     pos = INTVAL (pos_rtx), pos_rtx = 0;
7346 
7347   if (GET_CODE (inner) == SUBREG
7348       && subreg_lowpart_p (inner)
7349       && (paradoxical_subreg_p (inner)
7350 	  /* If trying or potentionally trying to extract
7351 	     bits outside of is_mode, don't look through
7352 	     non-paradoxical SUBREGs.  See PR82192.  */
7353 	  || (pos_rtx == NULL_RTX
7354 	      && pos + len <= GET_MODE_PRECISION (is_mode))))
7355     {
7356       /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7357 	 consider just the QI as the memory to extract from.
7358 	 The subreg adds or removes high bits; its mode is
7359 	 irrelevant to the meaning of this extraction,
7360 	 since POS and LEN count from the lsb.  */
7361       if (MEM_P (SUBREG_REG (inner)))
7362 	is_mode = GET_MODE (SUBREG_REG (inner));
7363       inner = SUBREG_REG (inner);
7364     }
7365   else if (GET_CODE (inner) == ASHIFT
7366 	   && CONST_INT_P (XEXP (inner, 1))
7367 	   && pos_rtx == 0 && pos == 0
7368 	   && len > UINTVAL (XEXP (inner, 1)))
7369     {
7370       /* We're extracting the least significant bits of an rtx
7371 	 (ashift X (const_int C)), where LEN > C.  Extract the
7372 	 least significant (LEN - C) bits of X, giving an rtx
7373 	 whose mode is MODE, then shift it left C times.  */
7374       new_rtx = make_extraction (mode, XEXP (inner, 0),
7375 			     0, 0, len - INTVAL (XEXP (inner, 1)),
7376 			     unsignedp, in_dest, in_compare);
7377       if (new_rtx != 0)
7378 	return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7379     }
7380   else if (GET_CODE (inner) == TRUNCATE
7381 	   /* If trying or potentionally trying to extract
7382 	      bits outside of is_mode, don't look through
7383 	      TRUNCATE.  See PR82192.  */
7384 	   && pos_rtx == NULL_RTX
7385 	   && pos + len <= GET_MODE_PRECISION (is_mode))
7386     inner = XEXP (inner, 0);
7387 
7388   inner_mode = GET_MODE (inner);
7389 
7390   /* See if this can be done without an extraction.  We never can if the
7391      width of the field is not the same as that of some integer mode. For
7392      registers, we can only avoid the extraction if the position is at the
7393      low-order bit and this is either not in the destination or we have the
7394      appropriate STRICT_LOW_PART operation available.
7395 
7396      For MEM, we can avoid an extract if the field starts on an appropriate
7397      boundary and we can change the mode of the memory reference.  */
7398 
7399   if (tmode != BLKmode
7400       && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7401 	   && !MEM_P (inner)
7402 	   && (pos == 0 || REG_P (inner))
7403 	   && (inner_mode == tmode
7404 	       || !REG_P (inner)
7405 	       || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7406 	       || reg_truncated_to_mode (tmode, inner))
7407 	   && (! in_dest
7408 	       || (REG_P (inner)
7409 		   && have_insn_for (STRICT_LOW_PART, tmode))))
7410 	  || (MEM_P (inner) && pos_rtx == 0
7411 	      && (pos
7412 		  % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7413 		     : BITS_PER_UNIT)) == 0
7414 	      /* We can't do this if we are widening INNER_MODE (it
7415 		 may not be aligned, for one thing).  */
7416 	      && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7417 	      && (inner_mode == tmode
7418 		  || (! mode_dependent_address_p (XEXP (inner, 0),
7419 						  MEM_ADDR_SPACE (inner))
7420 		      && ! MEM_VOLATILE_P (inner))))))
7421     {
7422       /* If INNER is a MEM, make a new MEM that encompasses just the desired
7423 	 field.  If the original and current mode are the same, we need not
7424 	 adjust the offset.  Otherwise, we do if bytes big endian.
7425 
7426 	 If INNER is not a MEM, get a piece consisting of just the field
7427 	 of interest (in this case POS % BITS_PER_WORD must be 0).  */
7428 
7429       if (MEM_P (inner))
7430 	{
7431 	  HOST_WIDE_INT offset;
7432 
7433 	  /* POS counts from lsb, but make OFFSET count in memory order.  */
7434 	  if (BYTES_BIG_ENDIAN)
7435 	    offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7436 	  else
7437 	    offset = pos / BITS_PER_UNIT;
7438 
7439 	  new_rtx = adjust_address_nv (inner, tmode, offset);
7440 	}
7441       else if (REG_P (inner))
7442 	{
7443 	  if (tmode != inner_mode)
7444 	    {
7445 	      /* We can't call gen_lowpart in a DEST since we
7446 		 always want a SUBREG (see below) and it would sometimes
7447 		 return a new hard register.  */
7448 	      if (pos || in_dest)
7449 		{
7450 		  HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7451 
7452 		  if (WORDS_BIG_ENDIAN
7453 		      && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7454 		    final_word = ((GET_MODE_SIZE (inner_mode)
7455 				   - GET_MODE_SIZE (tmode))
7456 				  / UNITS_PER_WORD) - final_word;
7457 
7458 		  final_word *= UNITS_PER_WORD;
7459 		  if (BYTES_BIG_ENDIAN &&
7460 		      GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7461 		    final_word += (GET_MODE_SIZE (inner_mode)
7462 				   - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7463 
7464 		  /* Avoid creating invalid subregs, for example when
7465 		     simplifying (x>>32)&255.  */
7466 		  if (!validate_subreg (tmode, inner_mode, inner, final_word))
7467 		    return NULL_RTX;
7468 
7469 		  new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7470 		}
7471 	      else
7472 		new_rtx = gen_lowpart (tmode, inner);
7473 	    }
7474 	  else
7475 	    new_rtx = inner;
7476 	}
7477       else
7478 	new_rtx = force_to_mode (inner, tmode,
7479 				 len >= HOST_BITS_PER_WIDE_INT
7480 				 ? ~(unsigned HOST_WIDE_INT) 0
7481 				 : ((unsigned HOST_WIDE_INT) 1 << len) - 1, 0);
7482 
7483       /* If this extraction is going into the destination of a SET,
7484 	 make a STRICT_LOW_PART unless we made a MEM.  */
7485 
7486       if (in_dest)
7487 	return (MEM_P (new_rtx) ? new_rtx
7488 		: (GET_CODE (new_rtx) != SUBREG
7489 		   ? gen_rtx_CLOBBER (tmode, const0_rtx)
7490 		   : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7491 
7492       if (mode == tmode)
7493 	return new_rtx;
7494 
7495       if (CONST_SCALAR_INT_P (new_rtx))
7496 	return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7497 					 mode, new_rtx, tmode);
7498 
7499       /* If we know that no extraneous bits are set, and that the high
7500 	 bit is not set, convert the extraction to the cheaper of
7501 	 sign and zero extension, that are equivalent in these cases.  */
7502       if (flag_expensive_optimizations
7503 	  && (HWI_COMPUTABLE_MODE_P (tmode)
7504 	      && ((nonzero_bits (new_rtx, tmode)
7505 		   & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7506 		  == 0)))
7507 	{
7508 	  rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7509 	  rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7510 
7511 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7512 	     backends.  */
7513 	  if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7514 	      <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7515 	    return temp;
7516 	  return temp1;
7517 	}
7518 
7519       /* Otherwise, sign- or zero-extend unless we already are in the
7520 	 proper mode.  */
7521 
7522       return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7523 			     mode, new_rtx));
7524     }
7525 
7526   /* Unless this is a COMPARE or we have a funny memory reference,
7527      don't do anything with zero-extending field extracts starting at
7528      the low-order bit since they are simple AND operations.  */
7529   if (pos_rtx == 0 && pos == 0 && ! in_dest
7530       && ! in_compare && unsignedp)
7531     return 0;
7532 
7533   /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7534      if the position is not a constant and the length is not 1.  In all
7535      other cases, we would only be going outside our object in cases when
7536      an original shift would have been undefined.  */
7537   if (MEM_P (inner)
7538       && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7539 	  || (pos_rtx != 0 && len != 1)))
7540     return 0;
7541 
7542   enum extraction_pattern pattern = (in_dest ? EP_insv
7543 				     : unsignedp ? EP_extzv : EP_extv);
7544 
7545   /* If INNER is not from memory, we want it to have the mode of a register
7546      extraction pattern's structure operand, or word_mode if there is no
7547      such pattern.  The same applies to extraction_mode and pos_mode
7548      and their respective operands.
7549 
7550      For memory, assume that the desired extraction_mode and pos_mode
7551      are the same as for a register operation, since at present we don't
7552      have named patterns for aligned memory structures.  */
7553   struct extraction_insn insn;
7554   if (get_best_reg_extraction_insn (&insn, pattern,
7555 				    GET_MODE_BITSIZE (inner_mode), mode))
7556     {
7557       wanted_inner_reg_mode = insn.struct_mode;
7558       pos_mode = insn.pos_mode;
7559       extraction_mode = insn.field_mode;
7560     }
7561 
7562   /* Never narrow an object, since that might not be safe.  */
7563 
7564   if (mode != VOIDmode
7565       && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7566     extraction_mode = mode;
7567 
7568   if (!MEM_P (inner))
7569     wanted_inner_mode = wanted_inner_reg_mode;
7570   else
7571     {
7572       /* Be careful not to go beyond the extracted object and maintain the
7573 	 natural alignment of the memory.  */
7574       wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7575       while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7576 	     > GET_MODE_BITSIZE (wanted_inner_mode))
7577 	{
7578 	  wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7579 	  gcc_assert (wanted_inner_mode != VOIDmode);
7580 	}
7581     }
7582 
7583   orig_pos = pos;
7584 
7585   if (BITS_BIG_ENDIAN)
7586     {
7587       /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7588 	 BITS_BIG_ENDIAN style.  If position is constant, compute new
7589 	 position.  Otherwise, build subtraction.
7590 	 Note that POS is relative to the mode of the original argument.
7591 	 If it's a MEM we need to recompute POS relative to that.
7592 	 However, if we're extracting from (or inserting into) a register,
7593 	 we want to recompute POS relative to wanted_inner_mode.  */
7594       int width = (MEM_P (inner)
7595 		   ? GET_MODE_BITSIZE (is_mode)
7596 		   : GET_MODE_BITSIZE (wanted_inner_mode));
7597 
7598       if (pos_rtx == 0)
7599 	pos = width - len - pos;
7600       else
7601 	pos_rtx
7602 	  = gen_rtx_MINUS (GET_MODE (pos_rtx),
7603 			   gen_int_mode (width - len, GET_MODE (pos_rtx)),
7604 			   pos_rtx);
7605       /* POS may be less than 0 now, but we check for that below.
7606 	 Note that it can only be less than 0 if !MEM_P (inner).  */
7607     }
7608 
7609   /* If INNER has a wider mode, and this is a constant extraction, try to
7610      make it smaller and adjust the byte to point to the byte containing
7611      the value.  */
7612   if (wanted_inner_mode != VOIDmode
7613       && inner_mode != wanted_inner_mode
7614       && ! pos_rtx
7615       && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7616       && MEM_P (inner)
7617       && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7618       && ! MEM_VOLATILE_P (inner))
7619     {
7620       int offset = 0;
7621 
7622       /* The computations below will be correct if the machine is big
7623 	 endian in both bits and bytes or little endian in bits and bytes.
7624 	 If it is mixed, we must adjust.  */
7625 
7626       /* If bytes are big endian and we had a paradoxical SUBREG, we must
7627 	 adjust OFFSET to compensate.  */
7628       if (BYTES_BIG_ENDIAN
7629 	  && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7630 	offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7631 
7632       /* We can now move to the desired byte.  */
7633       offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7634 		* GET_MODE_SIZE (wanted_inner_mode);
7635       pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7636 
7637       if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7638 	  && is_mode != wanted_inner_mode)
7639 	offset = (GET_MODE_SIZE (is_mode)
7640 		  - GET_MODE_SIZE (wanted_inner_mode) - offset);
7641 
7642       inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7643     }
7644 
7645   /* If INNER is not memory, get it into the proper mode.  If we are changing
7646      its mode, POS must be a constant and smaller than the size of the new
7647      mode.  */
7648   else if (!MEM_P (inner))
7649     {
7650       /* On the LHS, don't create paradoxical subregs implicitely truncating
7651 	 the register unless TRULY_NOOP_TRUNCATION.  */
7652       if (in_dest
7653 	  && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7654 					     wanted_inner_mode))
7655 	return NULL_RTX;
7656 
7657       if (GET_MODE (inner) != wanted_inner_mode
7658 	  && (pos_rtx != 0
7659 	      || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7660 	return NULL_RTX;
7661 
7662       if (orig_pos < 0)
7663 	return NULL_RTX;
7664 
7665       inner = force_to_mode (inner, wanted_inner_mode,
7666 			     pos_rtx
7667 			     || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7668 			     ? ~(unsigned HOST_WIDE_INT) 0
7669 			     : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7670 				<< orig_pos),
7671 			     0);
7672     }
7673 
7674   /* Adjust mode of POS_RTX, if needed.  If we want a wider mode, we
7675      have to zero extend.  Otherwise, we can just use a SUBREG.  */
7676   if (pos_rtx != 0
7677       && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7678     {
7679       rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7680 				     GET_MODE (pos_rtx));
7681 
7682       /* If we know that no extraneous bits are set, and that the high
7683 	 bit is not set, convert extraction to cheaper one - either
7684 	 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7685 	 cases.  */
7686       if (flag_expensive_optimizations
7687 	  && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7688 	      && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7689 		   & ~(((unsigned HOST_WIDE_INT)
7690 			GET_MODE_MASK (GET_MODE (pos_rtx)))
7691 		       >> 1))
7692 		  == 0)))
7693 	{
7694 	  rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7695 					  GET_MODE (pos_rtx));
7696 
7697 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7698 	     backends.  */
7699 	  if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7700 	      < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7701 	    temp = temp1;
7702 	}
7703       pos_rtx = temp;
7704     }
7705 
7706   /* Make POS_RTX unless we already have it and it is correct.  If we don't
7707      have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7708      be a CONST_INT.  */
7709   if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7710     pos_rtx = orig_pos_rtx;
7711 
7712   else if (pos_rtx == 0)
7713     pos_rtx = GEN_INT (pos);
7714 
7715   /* Make the required operation.  See if we can use existing rtx.  */
7716   new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7717 			 extraction_mode, inner, GEN_INT (len), pos_rtx);
7718   if (! in_dest)
7719     new_rtx = gen_lowpart (mode, new_rtx);
7720 
7721   return new_rtx;
7722 }
7723 
7724 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7725    with any other operations in X.  Return X without that shift if so.  */
7726 
7727 static rtx
7728 extract_left_shift (rtx x, int count)
7729 {
7730   enum rtx_code code = GET_CODE (x);
7731   machine_mode mode = GET_MODE (x);
7732   rtx tem;
7733 
7734   switch (code)
7735     {
7736     case ASHIFT:
7737       /* This is the shift itself.  If it is wide enough, we will return
7738 	 either the value being shifted if the shift count is equal to
7739 	 COUNT or a shift for the difference.  */
7740       if (CONST_INT_P (XEXP (x, 1))
7741 	  && INTVAL (XEXP (x, 1)) >= count)
7742 	return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7743 				     INTVAL (XEXP (x, 1)) - count);
7744       break;
7745 
7746     case NEG:  case NOT:
7747       if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7748 	return simplify_gen_unary (code, mode, tem, mode);
7749 
7750       break;
7751 
7752     case PLUS:  case IOR:  case XOR:  case AND:
7753       /* If we can safely shift this constant and we find the inner shift,
7754 	 make a new operation.  */
7755       if (CONST_INT_P (XEXP (x, 1))
7756 	  && (UINTVAL (XEXP (x, 1))
7757 	      & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7758 	  && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7759 	{
7760 	  HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7761 	  return simplify_gen_binary (code, mode, tem,
7762 				      gen_int_mode (val, mode));
7763 	}
7764       break;
7765 
7766     default:
7767       break;
7768     }
7769 
7770   return 0;
7771 }
7772 
7773 /* Look at the expression rooted at X.  Look for expressions
7774    equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7775    Form these expressions.
7776 
7777    Return the new rtx, usually just X.
7778 
7779    Also, for machines like the VAX that don't have logical shift insns,
7780    try to convert logical to arithmetic shift operations in cases where
7781    they are equivalent.  This undoes the canonicalizations to logical
7782    shifts done elsewhere.
7783 
7784    We try, as much as possible, to re-use rtl expressions to save memory.
7785 
7786    IN_CODE says what kind of expression we are processing.  Normally, it is
7787    SET.  In a memory address it is MEM.  When processing the arguments of
7788    a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
7789    precisely it is an equality comparison against zero.  */
7790 
7791 rtx
7792 make_compound_operation (rtx x, enum rtx_code in_code)
7793 {
7794   enum rtx_code code = GET_CODE (x);
7795   machine_mode mode = GET_MODE (x);
7796   int mode_width = GET_MODE_PRECISION (mode);
7797   rtx rhs, lhs;
7798   enum rtx_code next_code;
7799   int i, j;
7800   rtx new_rtx = 0;
7801   rtx tem;
7802   const char *fmt;
7803   bool equality_comparison = false;
7804 
7805   /* Select the code to be used in recursive calls.  Once we are inside an
7806      address, we stay there.  If we have a comparison, set to COMPARE,
7807      but once inside, go back to our default of SET.  */
7808 
7809   if (in_code == EQ)
7810     {
7811       equality_comparison = true;
7812       in_code = COMPARE;
7813     }
7814   next_code = (code == MEM ? MEM
7815 	       : ((code == COMPARE || COMPARISON_P (x))
7816 		  && XEXP (x, 1) == const0_rtx) ? COMPARE
7817 	       : in_code == COMPARE ? SET : in_code);
7818 
7819   /* Process depending on the code of this operation.  If NEW is set
7820      nonzero, it will be returned.  */
7821 
7822   switch (code)
7823     {
7824     case ASHIFT:
7825       /* Convert shifts by constants into multiplications if inside
7826 	 an address.  */
7827       if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7828 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7829 	  && INTVAL (XEXP (x, 1)) >= 0
7830 	  && SCALAR_INT_MODE_P (mode))
7831 	{
7832 	  HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7833 	  HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7834 
7835 	  new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7836 	  if (GET_CODE (new_rtx) == NEG)
7837 	    {
7838 	      new_rtx = XEXP (new_rtx, 0);
7839 	      multval = -multval;
7840 	    }
7841 	  multval = trunc_int_for_mode (multval, mode);
7842 	  new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7843 	}
7844       break;
7845 
7846     case PLUS:
7847       lhs = XEXP (x, 0);
7848       rhs = XEXP (x, 1);
7849       lhs = make_compound_operation (lhs, next_code);
7850       rhs = make_compound_operation (rhs, next_code);
7851       if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7852 	  && SCALAR_INT_MODE_P (mode))
7853 	{
7854 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7855 				     XEXP (lhs, 1));
7856 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7857 	}
7858       else if (GET_CODE (lhs) == MULT
7859 	       && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7860 	{
7861 	  tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7862 				     simplify_gen_unary (NEG, mode,
7863 							 XEXP (lhs, 1),
7864 							 mode));
7865 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7866 	}
7867       else
7868 	{
7869 	  SUBST (XEXP (x, 0), lhs);
7870 	  SUBST (XEXP (x, 1), rhs);
7871 	  goto maybe_swap;
7872 	}
7873       x = gen_lowpart (mode, new_rtx);
7874       goto maybe_swap;
7875 
7876     case MINUS:
7877       lhs = XEXP (x, 0);
7878       rhs = XEXP (x, 1);
7879       lhs = make_compound_operation (lhs, next_code);
7880       rhs = make_compound_operation (rhs, next_code);
7881       if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7882 	  && SCALAR_INT_MODE_P (mode))
7883 	{
7884 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7885 				     XEXP (rhs, 1));
7886 	  new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7887 	}
7888       else if (GET_CODE (rhs) == MULT
7889 	       && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7890 	{
7891 	  tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7892 				     simplify_gen_unary (NEG, mode,
7893 							 XEXP (rhs, 1),
7894 							 mode));
7895 	  new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7896 	}
7897       else
7898 	{
7899 	  SUBST (XEXP (x, 0), lhs);
7900 	  SUBST (XEXP (x, 1), rhs);
7901 	  return x;
7902 	}
7903       return gen_lowpart (mode, new_rtx);
7904 
7905     case AND:
7906       /* If the second operand is not a constant, we can't do anything
7907 	 with it.  */
7908       if (!CONST_INT_P (XEXP (x, 1)))
7909 	break;
7910 
7911       /* If the constant is a power of two minus one and the first operand
7912 	 is a logical right shift, make an extraction.  */
7913       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7914 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7915 	{
7916 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7917 	  new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7918 				 0, in_code == COMPARE);
7919 	}
7920 
7921       /* Same as previous, but for (subreg (lshiftrt ...)) in first op.  */
7922       else if (GET_CODE (XEXP (x, 0)) == SUBREG
7923 	       && subreg_lowpart_p (XEXP (x, 0))
7924 	       && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7925 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7926 	{
7927 	  rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
7928 	  machine_mode inner_mode = GET_MODE (inner_x0);
7929 	  new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
7930 	  new_rtx = make_extraction (inner_mode, new_rtx, 0,
7931 				     XEXP (inner_x0, 1),
7932 				     i, 1, 0, in_code == COMPARE);
7933 
7934 	  if (new_rtx)
7935 	    {
7936 	      /* If we narrowed the mode when dropping the subreg, then
7937 		 we must zero-extend to keep the semantics of the AND.  */
7938 	      if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
7939 		;
7940 	      else if (SCALAR_INT_MODE_P (inner_mode))
7941 		new_rtx = simplify_gen_unary (ZERO_EXTEND, mode,
7942 					      new_rtx, inner_mode);
7943 	      else
7944 		new_rtx = NULL;
7945 	    }
7946 
7947 	  /* If that didn't give anything, see if the AND simplifies on
7948 	     its own.  */
7949 	  if (!new_rtx && i >= 0)
7950 	    {
7951 	      new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7952 	      new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
7953 					 0, in_code == COMPARE);
7954 	    }
7955 	}
7956       /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)).  */
7957       else if ((GET_CODE (XEXP (x, 0)) == XOR
7958 		|| GET_CODE (XEXP (x, 0)) == IOR)
7959 	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7960 	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7961 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7962 	{
7963 	  /* Apply the distributive law, and then try to make extractions.  */
7964 	  new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7965 				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7966 					     XEXP (x, 1)),
7967 				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7968 					     XEXP (x, 1)));
7969 	  new_rtx = make_compound_operation (new_rtx, in_code);
7970 	}
7971 
7972       /* If we are have (and (rotate X C) M) and C is larger than the number
7973 	 of bits in M, this is an extraction.  */
7974 
7975       else if (GET_CODE (XEXP (x, 0)) == ROTATE
7976 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7977 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7978 	       && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7979 	{
7980 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7981 	  new_rtx = make_extraction (mode, new_rtx,
7982 				 (GET_MODE_PRECISION (mode)
7983 				  - INTVAL (XEXP (XEXP (x, 0), 1))),
7984 				 NULL_RTX, i, 1, 0, in_code == COMPARE);
7985 	}
7986 
7987       /* On machines without logical shifts, if the operand of the AND is
7988 	 a logical shift and our mask turns off all the propagated sign
7989 	 bits, we can replace the logical shift with an arithmetic shift.  */
7990       else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7991 	       && !have_insn_for (LSHIFTRT, mode)
7992 	       && have_insn_for (ASHIFTRT, mode)
7993 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7994 	       && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7995 	       && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7996 	       && mode_width <= HOST_BITS_PER_WIDE_INT)
7997 	{
7998 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7999 
8000 	  mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8001 	  if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8002 	    SUBST (XEXP (x, 0),
8003 		   gen_rtx_ASHIFTRT (mode,
8004 				     make_compound_operation
8005 				     (XEXP (XEXP (x, 0), 0), next_code),
8006 				     XEXP (XEXP (x, 0), 1)));
8007 	}
8008 
8009       /* If the constant is one less than a power of two, this might be
8010 	 representable by an extraction even if no shift is present.
8011 	 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8012 	 we are in a COMPARE.  */
8013       else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8014 	new_rtx = make_extraction (mode,
8015 			       make_compound_operation (XEXP (x, 0),
8016 							next_code),
8017 			       0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8018 
8019       /* If we are in a comparison and this is an AND with a power of two,
8020 	 convert this into the appropriate bit extract.  */
8021       else if (in_code == COMPARE
8022 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8023 	       && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8024 	new_rtx = make_extraction (mode,
8025 				   make_compound_operation (XEXP (x, 0),
8026 							    next_code),
8027 				   i, NULL_RTX, 1, 1, 0, 1);
8028 
8029       break;
8030 
8031     case LSHIFTRT:
8032       /* If the sign bit is known to be zero, replace this with an
8033 	 arithmetic shift.  */
8034       if (have_insn_for (ASHIFTRT, mode)
8035 	  && ! have_insn_for (LSHIFTRT, mode)
8036 	  && mode_width <= HOST_BITS_PER_WIDE_INT
8037 	  && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8038 	{
8039 	  new_rtx = gen_rtx_ASHIFTRT (mode,
8040 				  make_compound_operation (XEXP (x, 0),
8041 							   next_code),
8042 				  XEXP (x, 1));
8043 	  break;
8044 	}
8045 
8046       /* ... fall through ...  */
8047 
8048     case ASHIFTRT:
8049       lhs = XEXP (x, 0);
8050       rhs = XEXP (x, 1);
8051 
8052       /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8053 	 this is a SIGN_EXTRACT.  */
8054       if (CONST_INT_P (rhs)
8055 	  && GET_CODE (lhs) == ASHIFT
8056 	  && CONST_INT_P (XEXP (lhs, 1))
8057 	  && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8058 	  && INTVAL (XEXP (lhs, 1)) >= 0
8059 	  && INTVAL (rhs) < mode_width)
8060 	{
8061 	  new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8062 	  new_rtx = make_extraction (mode, new_rtx,
8063 				 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8064 				 NULL_RTX, mode_width - INTVAL (rhs),
8065 				 code == LSHIFTRT, 0, in_code == COMPARE);
8066 	  break;
8067 	}
8068 
8069       /* See if we have operations between an ASHIFTRT and an ASHIFT.
8070 	 If so, try to merge the shifts into a SIGN_EXTEND.  We could
8071 	 also do this for some cases of SIGN_EXTRACT, but it doesn't
8072 	 seem worth the effort; the case checked for occurs on Alpha.  */
8073 
8074       if (!OBJECT_P (lhs)
8075 	  && ! (GET_CODE (lhs) == SUBREG
8076 		&& (OBJECT_P (SUBREG_REG (lhs))))
8077 	  && CONST_INT_P (rhs)
8078 	  && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8079 	  && INTVAL (rhs) < mode_width
8080 	  && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
8081 	new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
8082 			       0, NULL_RTX, mode_width - INTVAL (rhs),
8083 			       code == LSHIFTRT, 0, in_code == COMPARE);
8084 
8085       break;
8086 
8087     case SUBREG:
8088       /* Call ourselves recursively on the inner expression.  If we are
8089 	 narrowing the object and it has a different RTL code from
8090 	 what it originally did, do this SUBREG as a force_to_mode.  */
8091       {
8092 	rtx inner = SUBREG_REG (x), simplified;
8093 	enum rtx_code subreg_code = in_code;
8094 
8095 	/* If in_code is COMPARE, it isn't always safe to pass it through
8096 	   to the recursive make_compound_operation call.  */
8097 	if (subreg_code == COMPARE
8098 	    && (!subreg_lowpart_p (x)
8099 		|| GET_CODE (inner) == SUBREG
8100 		/* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8101 		   is (const_int 0), rather than
8102 		   (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8103 		   Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8104 		   for non-equality comparisons against 0 is not equivalent
8105 		   to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0).  */
8106 		|| (GET_CODE (inner) == AND
8107 		    && CONST_INT_P (XEXP (inner, 1))
8108 		    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8109 		    && exact_log2 (UINTVAL (XEXP (inner, 1)))
8110 		       >= GET_MODE_BITSIZE (mode) - 1)))
8111 	  subreg_code = SET;
8112 
8113 	tem = make_compound_operation (inner, subreg_code);
8114 
8115 	simplified
8116 	  = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8117 	if (simplified)
8118 	  tem = simplified;
8119 
8120 	if (GET_CODE (tem) != GET_CODE (inner)
8121 	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8122 	    && subreg_lowpart_p (x))
8123 	  {
8124 	    rtx newer
8125 	      = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
8126 
8127 	    /* If we have something other than a SUBREG, we might have
8128 	       done an expansion, so rerun ourselves.  */
8129 	    if (GET_CODE (newer) != SUBREG)
8130 	      newer = make_compound_operation (newer, in_code);
8131 
8132 	    /* force_to_mode can expand compounds.  If it just re-expanded the
8133 	       compound, use gen_lowpart to convert to the desired mode.  */
8134 	    if (rtx_equal_p (newer, x)
8135 		/* Likewise if it re-expanded the compound only partially.
8136 		   This happens for SUBREG of ZERO_EXTRACT if they extract
8137 		   the same number of bits.  */
8138 		|| (GET_CODE (newer) == SUBREG
8139 		    && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8140 			|| GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8141 		    && GET_CODE (inner) == AND
8142 		    && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8143 	      return gen_lowpart (GET_MODE (x), tem);
8144 
8145 	    return newer;
8146 	  }
8147 
8148 	if (simplified)
8149 	  return tem;
8150       }
8151       break;
8152 
8153     default:
8154       break;
8155     }
8156 
8157   if (new_rtx)
8158     {
8159       x = gen_lowpart (mode, new_rtx);
8160       code = GET_CODE (x);
8161     }
8162 
8163   /* Now recursively process each operand of this operation.  We need to
8164      handle ZERO_EXTEND specially so that we don't lose track of the
8165      inner mode.  */
8166   if (GET_CODE (x) == ZERO_EXTEND)
8167     {
8168       new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8169       tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8170 					    new_rtx, GET_MODE (XEXP (x, 0)));
8171       if (tem)
8172 	return tem;
8173       SUBST (XEXP (x, 0), new_rtx);
8174       return x;
8175     }
8176 
8177   fmt = GET_RTX_FORMAT (code);
8178   for (i = 0; i < GET_RTX_LENGTH (code); i++)
8179     if (fmt[i] == 'e')
8180       {
8181 	new_rtx = make_compound_operation (XEXP (x, i), next_code);
8182 	SUBST (XEXP (x, i), new_rtx);
8183       }
8184     else if (fmt[i] == 'E')
8185       for (j = 0; j < XVECLEN (x, i); j++)
8186 	{
8187 	  new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8188 	  SUBST (XVECEXP (x, i, j), new_rtx);
8189 	}
8190 
8191  maybe_swap:
8192   /* If this is a commutative operation, the changes to the operands
8193      may have made it noncanonical.  */
8194   if (COMMUTATIVE_ARITH_P (x)
8195       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
8196     {
8197       tem = XEXP (x, 0);
8198       SUBST (XEXP (x, 0), XEXP (x, 1));
8199       SUBST (XEXP (x, 1), tem);
8200     }
8201 
8202   return x;
8203 }
8204 
8205 /* Given M see if it is a value that would select a field of bits
8206    within an item, but not the entire word.  Return -1 if not.
8207    Otherwise, return the starting position of the field, where 0 is the
8208    low-order bit.
8209 
8210    *PLEN is set to the length of the field.  */
8211 
8212 static int
8213 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8214 {
8215   /* Get the bit number of the first 1 bit from the right, -1 if none.  */
8216   int pos = m ? ctz_hwi (m) : -1;
8217   int len = 0;
8218 
8219   if (pos >= 0)
8220     /* Now shift off the low-order zero bits and see if we have a
8221        power of two minus 1.  */
8222     len = exact_log2 ((m >> pos) + 1);
8223 
8224   if (len <= 0)
8225     pos = -1;
8226 
8227   *plen = len;
8228   return pos;
8229 }
8230 
8231 /* If X refers to a register that equals REG in value, replace these
8232    references with REG.  */
8233 static rtx
8234 canon_reg_for_combine (rtx x, rtx reg)
8235 {
8236   rtx op0, op1, op2;
8237   const char *fmt;
8238   int i;
8239   bool copied;
8240 
8241   enum rtx_code code = GET_CODE (x);
8242   switch (GET_RTX_CLASS (code))
8243     {
8244     case RTX_UNARY:
8245       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8246       if (op0 != XEXP (x, 0))
8247 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8248 				   GET_MODE (reg));
8249       break;
8250 
8251     case RTX_BIN_ARITH:
8252     case RTX_COMM_ARITH:
8253       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8254       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8255       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8256 	return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8257       break;
8258 
8259     case RTX_COMPARE:
8260     case RTX_COMM_COMPARE:
8261       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8262       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8263       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8264 	return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8265 					GET_MODE (op0), op0, op1);
8266       break;
8267 
8268     case RTX_TERNARY:
8269     case RTX_BITFIELD_OPS:
8270       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8271       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8272       op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8273       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8274 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8275 				     GET_MODE (op0), op0, op1, op2);
8276 
8277     case RTX_OBJ:
8278       if (REG_P (x))
8279 	{
8280 	  if (rtx_equal_p (get_last_value (reg), x)
8281 	      || rtx_equal_p (reg, get_last_value (x)))
8282 	    return reg;
8283 	  else
8284 	    break;
8285 	}
8286 
8287       /* fall through */
8288 
8289     default:
8290       fmt = GET_RTX_FORMAT (code);
8291       copied = false;
8292       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8293 	if (fmt[i] == 'e')
8294 	  {
8295 	    rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8296 	    if (op != XEXP (x, i))
8297 	      {
8298 		if (!copied)
8299 		  {
8300 		    copied = true;
8301 		    x = copy_rtx (x);
8302 		  }
8303 		XEXP (x, i) = op;
8304 	      }
8305 	  }
8306 	else if (fmt[i] == 'E')
8307 	  {
8308 	    int j;
8309 	    for (j = 0; j < XVECLEN (x, i); j++)
8310 	      {
8311 		rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8312 		if (op != XVECEXP (x, i, j))
8313 		  {
8314 		    if (!copied)
8315 		      {
8316 			copied = true;
8317 			x = copy_rtx (x);
8318 		      }
8319 		    XVECEXP (x, i, j) = op;
8320 		  }
8321 	      }
8322 	  }
8323 
8324       break;
8325     }
8326 
8327   return x;
8328 }
8329 
8330 /* Return X converted to MODE.  If the value is already truncated to
8331    MODE we can just return a subreg even though in the general case we
8332    would need an explicit truncation.  */
8333 
8334 static rtx
8335 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8336 {
8337   if (!CONST_INT_P (x)
8338       && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8339       && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8340       && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8341     {
8342       /* Bit-cast X into an integer mode.  */
8343       if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8344 	x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8345       x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8346 			      x, GET_MODE (x));
8347     }
8348 
8349   return gen_lowpart (mode, x);
8350 }
8351 
8352 /* See if X can be simplified knowing that we will only refer to it in
8353    MODE and will only refer to those bits that are nonzero in MASK.
8354    If other bits are being computed or if masking operations are done
8355    that select a superset of the bits in MASK, they can sometimes be
8356    ignored.
8357 
8358    Return a possibly simplified expression, but always convert X to
8359    MODE.  If X is a CONST_INT, AND the CONST_INT with MASK.
8360 
8361    If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8362    are all off in X.  This is used when X will be complemented, by either
8363    NOT, NEG, or XOR.  */
8364 
8365 static rtx
8366 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8367 	       int just_select)
8368 {
8369   enum rtx_code code = GET_CODE (x);
8370   int next_select = just_select || code == XOR || code == NOT || code == NEG;
8371   machine_mode op_mode;
8372   unsigned HOST_WIDE_INT fuller_mask, nonzero;
8373   rtx op0, op1, temp;
8374 
8375   /* If this is a CALL or ASM_OPERANDS, don't do anything.  Some of the
8376      code below will do the wrong thing since the mode of such an
8377      expression is VOIDmode.
8378 
8379      Also do nothing if X is a CLOBBER; this can happen if X was
8380      the return value from a call to gen_lowpart.  */
8381   if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8382     return x;
8383 
8384   /* We want to perform the operation in its present mode unless we know
8385      that the operation is valid in MODE, in which case we do the operation
8386      in MODE.  */
8387   op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8388 	      && have_insn_for (code, mode))
8389 	     ? mode : GET_MODE (x));
8390 
8391   /* It is not valid to do a right-shift in a narrower mode
8392      than the one it came in with.  */
8393   if ((code == LSHIFTRT || code == ASHIFTRT)
8394       && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8395     op_mode = GET_MODE (x);
8396 
8397   /* Truncate MASK to fit OP_MODE.  */
8398   if (op_mode)
8399     mask &= GET_MODE_MASK (op_mode);
8400 
8401   /* When we have an arithmetic operation, or a shift whose count we
8402      do not know, we need to assume that all bits up to the highest-order
8403      bit in MASK will be needed.  This is how we form such a mask.  */
8404   if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8405     fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8406   else
8407     fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8408 		   - 1);
8409 
8410   /* Determine what bits of X are guaranteed to be (non)zero.  */
8411   nonzero = nonzero_bits (x, mode);
8412 
8413   /* If none of the bits in X are needed, return a zero.  */
8414   if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8415     x = const0_rtx;
8416 
8417   /* If X is a CONST_INT, return a new one.  Do this here since the
8418      test below will fail.  */
8419   if (CONST_INT_P (x))
8420     {
8421       if (SCALAR_INT_MODE_P (mode))
8422 	return gen_int_mode (INTVAL (x) & mask, mode);
8423       else
8424 	{
8425 	  x = GEN_INT (INTVAL (x) & mask);
8426 	  return gen_lowpart_common (mode, x);
8427 	}
8428     }
8429 
8430   /* If X is narrower than MODE and we want all the bits in X's mode, just
8431      get X in the proper mode.  */
8432   if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8433       && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8434     return gen_lowpart (mode, x);
8435 
8436   /* We can ignore the effect of a SUBREG if it narrows the mode or
8437      if the constant masks to zero all the bits the mode doesn't have.  */
8438   if (GET_CODE (x) == SUBREG
8439       && subreg_lowpart_p (x)
8440       && ((GET_MODE_SIZE (GET_MODE (x))
8441 	   < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8442 	  || (0 == (mask
8443 		    & GET_MODE_MASK (GET_MODE (x))
8444 		    & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8445     return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8446 
8447   /* The arithmetic simplifications here only work for scalar integer modes.  */
8448   if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8449     return gen_lowpart_or_truncate (mode, x);
8450 
8451   switch (code)
8452     {
8453     case CLOBBER:
8454       /* If X is a (clobber (const_int)), return it since we know we are
8455 	 generating something that won't match.  */
8456       return x;
8457 
8458     case SIGN_EXTEND:
8459     case ZERO_EXTEND:
8460     case ZERO_EXTRACT:
8461     case SIGN_EXTRACT:
8462       x = expand_compound_operation (x);
8463       if (GET_CODE (x) != code)
8464 	return force_to_mode (x, mode, mask, next_select);
8465       break;
8466 
8467     case TRUNCATE:
8468       /* Similarly for a truncate.  */
8469       return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8470 
8471     case AND:
8472       /* If this is an AND with a constant, convert it into an AND
8473 	 whose constant is the AND of that constant with MASK.  If it
8474 	 remains an AND of MASK, delete it since it is redundant.  */
8475 
8476       if (CONST_INT_P (XEXP (x, 1)))
8477 	{
8478 	  x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8479 				      mask & INTVAL (XEXP (x, 1)));
8480 
8481 	  /* If X is still an AND, see if it is an AND with a mask that
8482 	     is just some low-order bits.  If so, and it is MASK, we don't
8483 	     need it.  */
8484 
8485 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8486 	      && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8487 		  == mask))
8488 	    x = XEXP (x, 0);
8489 
8490 	  /* If it remains an AND, try making another AND with the bits
8491 	     in the mode mask that aren't in MASK turned on.  If the
8492 	     constant in the AND is wide enough, this might make a
8493 	     cheaper constant.  */
8494 
8495 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8496 	      && GET_MODE_MASK (GET_MODE (x)) != mask
8497 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8498 	    {
8499 	      unsigned HOST_WIDE_INT cval
8500 		= UINTVAL (XEXP (x, 1))
8501 		  | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8502 	      rtx y;
8503 
8504 	      y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0),
8505 				       gen_int_mode (cval, GET_MODE (x)));
8506 	      if (set_src_cost (y, GET_MODE (x), optimize_this_for_speed_p)
8507 	          < set_src_cost (x, GET_MODE (x), optimize_this_for_speed_p))
8508 		x = y;
8509 	    }
8510 
8511 	  break;
8512 	}
8513 
8514       goto binop;
8515 
8516     case PLUS:
8517       /* In (and (plus FOO C1) M), if M is a mask that just turns off
8518 	 low-order bits (as in an alignment operation) and FOO is already
8519 	 aligned to that boundary, mask C1 to that boundary as well.
8520 	 This may eliminate that PLUS and, later, the AND.  */
8521 
8522       {
8523 	unsigned int width = GET_MODE_PRECISION (mode);
8524 	unsigned HOST_WIDE_INT smask = mask;
8525 
8526 	/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8527 	   number, sign extend it.  */
8528 
8529 	if (width < HOST_BITS_PER_WIDE_INT
8530 	    && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8531 	  smask |= HOST_WIDE_INT_M1U << width;
8532 
8533 	if (CONST_INT_P (XEXP (x, 1))
8534 	    && exact_log2 (- smask) >= 0
8535 	    && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8536 	    && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8537 	  return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8538 					       (INTVAL (XEXP (x, 1)) & smask)),
8539 				mode, smask, next_select);
8540       }
8541 
8542       /* ... fall through ...  */
8543 
8544     case MULT:
8545       /* Substituting into the operands of a widening MULT is not likely to
8546 	 create RTL matching a machine insn.  */
8547       if (code == MULT
8548 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8549 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8550 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8551 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8552 	  && REG_P (XEXP (XEXP (x, 0), 0))
8553 	  && REG_P (XEXP (XEXP (x, 1), 0)))
8554 	return gen_lowpart_or_truncate (mode, x);
8555 
8556       /* For PLUS, MINUS and MULT, we need any bits less significant than the
8557 	 most significant bit in MASK since carries from those bits will
8558 	 affect the bits we are interested in.  */
8559       mask = fuller_mask;
8560       goto binop;
8561 
8562     case MINUS:
8563       /* If X is (minus C Y) where C's least set bit is larger than any bit
8564 	 in the mask, then we may replace with (neg Y).  */
8565       if (CONST_INT_P (XEXP (x, 0))
8566 	  && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask))
8567 	{
8568 	  x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8569 				  GET_MODE (x));
8570 	  return force_to_mode (x, mode, mask, next_select);
8571 	}
8572 
8573       /* Similarly, if C contains every bit in the fuller_mask, then we may
8574 	 replace with (not Y).  */
8575       if (CONST_INT_P (XEXP (x, 0))
8576 	  && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8577 	{
8578 	  x = simplify_gen_unary (NOT, GET_MODE (x),
8579 				  XEXP (x, 1), GET_MODE (x));
8580 	  return force_to_mode (x, mode, mask, next_select);
8581 	}
8582 
8583       mask = fuller_mask;
8584       goto binop;
8585 
8586     case IOR:
8587     case XOR:
8588       /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8589 	 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8590 	 operation which may be a bitfield extraction.  Ensure that the
8591 	 constant we form is not wider than the mode of X.  */
8592 
8593       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8594 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8595 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8596 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8597 	  && CONST_INT_P (XEXP (x, 1))
8598 	  && ((INTVAL (XEXP (XEXP (x, 0), 1))
8599 	       + floor_log2 (INTVAL (XEXP (x, 1))))
8600 	      < GET_MODE_PRECISION (GET_MODE (x)))
8601 	  && (UINTVAL (XEXP (x, 1))
8602 	      & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8603 	{
8604 	  temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8605 			       << INTVAL (XEXP (XEXP (x, 0), 1)),
8606 			       GET_MODE (x));
8607 	  temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8608 				      XEXP (XEXP (x, 0), 0), temp);
8609 	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8610 				   XEXP (XEXP (x, 0), 1));
8611 	  return force_to_mode (x, mode, mask, next_select);
8612 	}
8613 
8614     binop:
8615       /* For most binary operations, just propagate into the operation and
8616 	 change the mode if we have an operation of that mode.  */
8617 
8618       op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8619       op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8620 
8621       /* If we ended up truncating both operands, truncate the result of the
8622 	 operation instead.  */
8623       if (GET_CODE (op0) == TRUNCATE
8624 	  && GET_CODE (op1) == TRUNCATE)
8625 	{
8626 	  op0 = XEXP (op0, 0);
8627 	  op1 = XEXP (op1, 0);
8628 	}
8629 
8630       op0 = gen_lowpart_or_truncate (op_mode, op0);
8631       op1 = gen_lowpart_or_truncate (op_mode, op1);
8632 
8633       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8634 	x = simplify_gen_binary (code, op_mode, op0, op1);
8635       break;
8636 
8637     case ASHIFT:
8638       /* For left shifts, do the same, but just for the first operand.
8639 	 However, we cannot do anything with shifts where we cannot
8640 	 guarantee that the counts are smaller than the size of the mode
8641 	 because such a count will have a different meaning in a
8642 	 wider mode.  */
8643 
8644       if (! (CONST_INT_P (XEXP (x, 1))
8645 	     && INTVAL (XEXP (x, 1)) >= 0
8646 	     && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8647 	  && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8648 		&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8649 		    < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8650 	break;
8651 
8652       /* If the shift count is a constant and we can do arithmetic in
8653 	 the mode of the shift, refine which bits we need.  Otherwise, use the
8654 	 conservative form of the mask.  */
8655       if (CONST_INT_P (XEXP (x, 1))
8656 	  && INTVAL (XEXP (x, 1)) >= 0
8657 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8658 	  && HWI_COMPUTABLE_MODE_P (op_mode))
8659 	mask >>= INTVAL (XEXP (x, 1));
8660       else
8661 	mask = fuller_mask;
8662 
8663       op0 = gen_lowpart_or_truncate (op_mode,
8664 				     force_to_mode (XEXP (x, 0), op_mode,
8665 						    mask, next_select));
8666 
8667       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8668 	x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8669       break;
8670 
8671     case LSHIFTRT:
8672       /* Here we can only do something if the shift count is a constant,
8673 	 this shift constant is valid for the host, and we can do arithmetic
8674 	 in OP_MODE.  */
8675 
8676       if (CONST_INT_P (XEXP (x, 1))
8677 	  && INTVAL (XEXP (x, 1)) >= 0
8678 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8679 	  && HWI_COMPUTABLE_MODE_P (op_mode))
8680 	{
8681 	  rtx inner = XEXP (x, 0);
8682 	  unsigned HOST_WIDE_INT inner_mask;
8683 
8684 	  /* Select the mask of the bits we need for the shift operand.  */
8685 	  inner_mask = mask << INTVAL (XEXP (x, 1));
8686 
8687 	  /* We can only change the mode of the shift if we can do arithmetic
8688 	     in the mode of the shift and INNER_MASK is no wider than the
8689 	     width of X's mode.  */
8690 	  if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8691 	    op_mode = GET_MODE (x);
8692 
8693 	  inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8694 
8695 	  if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8696 	    x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8697 	}
8698 
8699       /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8700 	 shift and AND produces only copies of the sign bit (C2 is one less
8701 	 than a power of two), we can do this with just a shift.  */
8702 
8703       if (GET_CODE (x) == LSHIFTRT
8704 	  && CONST_INT_P (XEXP (x, 1))
8705 	  /* The shift puts one of the sign bit copies in the least significant
8706 	     bit.  */
8707 	  && ((INTVAL (XEXP (x, 1))
8708 	       + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8709 	      >= GET_MODE_PRECISION (GET_MODE (x)))
8710 	  && exact_log2 (mask + 1) >= 0
8711 	  /* Number of bits left after the shift must be more than the mask
8712 	     needs.  */
8713 	  && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8714 	      <= GET_MODE_PRECISION (GET_MODE (x)))
8715 	  /* Must be more sign bit copies than the mask needs.  */
8716 	  && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8717 	      >= exact_log2 (mask + 1)))
8718 	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8719 				 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8720 					  - exact_log2 (mask + 1)));
8721 
8722       goto shiftrt;
8723 
8724     case ASHIFTRT:
8725       /* If we are just looking for the sign bit, we don't need this shift at
8726 	 all, even if it has a variable count.  */
8727       if (val_signbit_p (GET_MODE (x), mask))
8728 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8729 
8730       /* If this is a shift by a constant, get a mask that contains those bits
8731 	 that are not copies of the sign bit.  We then have two cases:  If
8732 	 MASK only includes those bits, this can be a logical shift, which may
8733 	 allow simplifications.  If MASK is a single-bit field not within
8734 	 those bits, we are requesting a copy of the sign bit and hence can
8735 	 shift the sign bit to the appropriate location.  */
8736 
8737       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8738 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8739 	{
8740 	  int i;
8741 
8742 	  /* If the considered data is wider than HOST_WIDE_INT, we can't
8743 	     represent a mask for all its bits in a single scalar.
8744 	     But we only care about the lower bits, so calculate these.  */
8745 
8746 	  if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8747 	    {
8748 	      nonzero = ~(unsigned HOST_WIDE_INT) 0;
8749 
8750 	      /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8751 		 is the number of bits a full-width mask would have set.
8752 		 We need only shift if these are fewer than nonzero can
8753 		 hold.  If not, we must keep all bits set in nonzero.  */
8754 
8755 	      if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8756 		  < HOST_BITS_PER_WIDE_INT)
8757 		nonzero >>= INTVAL (XEXP (x, 1))
8758 			    + HOST_BITS_PER_WIDE_INT
8759 			    - GET_MODE_PRECISION (GET_MODE (x)) ;
8760 	    }
8761 	  else
8762 	    {
8763 	      nonzero = GET_MODE_MASK (GET_MODE (x));
8764 	      nonzero >>= INTVAL (XEXP (x, 1));
8765 	    }
8766 
8767 	  if ((mask & ~nonzero) == 0)
8768 	    {
8769 	      x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8770 					XEXP (x, 0), INTVAL (XEXP (x, 1)));
8771 	      if (GET_CODE (x) != ASHIFTRT)
8772 		return force_to_mode (x, mode, mask, next_select);
8773 	    }
8774 
8775 	  else if ((i = exact_log2 (mask)) >= 0)
8776 	    {
8777 	      x = simplify_shift_const
8778 		  (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8779 		   GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8780 
8781 	      if (GET_CODE (x) != ASHIFTRT)
8782 		return force_to_mode (x, mode, mask, next_select);
8783 	    }
8784 	}
8785 
8786       /* If MASK is 1, convert this to an LSHIFTRT.  This can be done
8787 	 even if the shift count isn't a constant.  */
8788       if (mask == 1)
8789 	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8790 				 XEXP (x, 0), XEXP (x, 1));
8791 
8792     shiftrt:
8793 
8794       /* If this is a zero- or sign-extension operation that just affects bits
8795 	 we don't care about, remove it.  Be sure the call above returned
8796 	 something that is still a shift.  */
8797 
8798       if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8799 	  && CONST_INT_P (XEXP (x, 1))
8800 	  && INTVAL (XEXP (x, 1)) >= 0
8801 	  && (INTVAL (XEXP (x, 1))
8802 	      <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8803 	  && GET_CODE (XEXP (x, 0)) == ASHIFT
8804 	  && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8805 	return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8806 			      next_select);
8807 
8808       break;
8809 
8810     case ROTATE:
8811     case ROTATERT:
8812       /* If the shift count is constant and we can do computations
8813 	 in the mode of X, compute where the bits we care about are.
8814 	 Otherwise, we can't do anything.  Don't change the mode of
8815 	 the shift or propagate MODE into the shift, though.  */
8816       if (CONST_INT_P (XEXP (x, 1))
8817 	  && INTVAL (XEXP (x, 1)) >= 0)
8818 	{
8819 	  temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8820 					    GET_MODE (x),
8821 					    gen_int_mode (mask, GET_MODE (x)),
8822 					    XEXP (x, 1));
8823 	  if (temp && CONST_INT_P (temp))
8824 	    x = simplify_gen_binary (code, GET_MODE (x),
8825 				     force_to_mode (XEXP (x, 0), GET_MODE (x),
8826 						    INTVAL (temp), next_select),
8827 				     XEXP (x, 1));
8828 	}
8829       break;
8830 
8831     case NEG:
8832       /* If we just want the low-order bit, the NEG isn't needed since it
8833 	 won't change the low-order bit.  */
8834       if (mask == 1)
8835 	return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8836 
8837       /* We need any bits less significant than the most significant bit in
8838 	 MASK since carries from those bits will affect the bits we are
8839 	 interested in.  */
8840       mask = fuller_mask;
8841       goto unop;
8842 
8843     case NOT:
8844       /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8845 	 same as the XOR case above.  Ensure that the constant we form is not
8846 	 wider than the mode of X.  */
8847 
8848       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8849 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8850 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8851 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8852 	      < GET_MODE_PRECISION (GET_MODE (x)))
8853 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8854 	{
8855 	  temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8856 			       GET_MODE (x));
8857 	  temp = simplify_gen_binary (XOR, GET_MODE (x),
8858 				      XEXP (XEXP (x, 0), 0), temp);
8859 	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8860 				   temp, XEXP (XEXP (x, 0), 1));
8861 
8862 	  return force_to_mode (x, mode, mask, next_select);
8863 	}
8864 
8865       /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8866 	 use the full mask inside the NOT.  */
8867       mask = fuller_mask;
8868 
8869     unop:
8870       op0 = gen_lowpart_or_truncate (op_mode,
8871 				     force_to_mode (XEXP (x, 0), mode, mask,
8872 						    next_select));
8873       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8874 	x = simplify_gen_unary (code, op_mode, op0, op_mode);
8875       break;
8876 
8877     case NE:
8878       /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8879 	 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8880 	 which is equal to STORE_FLAG_VALUE.  */
8881       if ((mask & ~STORE_FLAG_VALUE) == 0
8882 	  && XEXP (x, 1) == const0_rtx
8883 	  && GET_MODE (XEXP (x, 0)) == mode
8884 	  && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8885 	  && (nonzero_bits (XEXP (x, 0), mode)
8886 	      == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8887 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8888 
8889       break;
8890 
8891     case IF_THEN_ELSE:
8892       /* We have no way of knowing if the IF_THEN_ELSE can itself be
8893 	 written in a narrower mode.  We play it safe and do not do so.  */
8894 
8895       op0 = gen_lowpart_or_truncate (GET_MODE (x),
8896 				     force_to_mode (XEXP (x, 1), mode,
8897 						    mask, next_select));
8898       op1 = gen_lowpart_or_truncate (GET_MODE (x),
8899 				     force_to_mode (XEXP (x, 2), mode,
8900 						    mask, next_select));
8901       if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
8902 	x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
8903 				  GET_MODE (XEXP (x, 0)), XEXP (x, 0),
8904 				  op0, op1);
8905       break;
8906 
8907     default:
8908       break;
8909     }
8910 
8911   /* Ensure we return a value of the proper mode.  */
8912   return gen_lowpart_or_truncate (mode, x);
8913 }
8914 
8915 /* Return nonzero if X is an expression that has one of two values depending on
8916    whether some other value is zero or nonzero.  In that case, we return the
8917    value that is being tested, *PTRUE is set to the value if the rtx being
8918    returned has a nonzero value, and *PFALSE is set to the other alternative.
8919 
8920    If we return zero, we set *PTRUE and *PFALSE to X.  */
8921 
8922 static rtx
8923 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8924 {
8925   machine_mode mode = GET_MODE (x);
8926   enum rtx_code code = GET_CODE (x);
8927   rtx cond0, cond1, true0, true1, false0, false1;
8928   unsigned HOST_WIDE_INT nz;
8929 
8930   /* If we are comparing a value against zero, we are done.  */
8931   if ((code == NE || code == EQ)
8932       && XEXP (x, 1) == const0_rtx)
8933     {
8934       *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8935       *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8936       return XEXP (x, 0);
8937     }
8938 
8939   /* If this is a unary operation whose operand has one of two values, apply
8940      our opcode to compute those values.  */
8941   else if (UNARY_P (x)
8942 	   && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8943     {
8944       *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8945       *pfalse = simplify_gen_unary (code, mode, false0,
8946 				    GET_MODE (XEXP (x, 0)));
8947       return cond0;
8948     }
8949 
8950   /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8951      make can't possibly match and would suppress other optimizations.  */
8952   else if (code == COMPARE)
8953     ;
8954 
8955   /* If this is a binary operation, see if either side has only one of two
8956      values.  If either one does or if both do and they are conditional on
8957      the same value, compute the new true and false values.  */
8958   else if (BINARY_P (x))
8959     {
8960       cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8961       cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8962 
8963       if ((cond0 != 0 || cond1 != 0)
8964 	  && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8965 	{
8966 	  /* If if_then_else_cond returned zero, then true/false are the
8967 	     same rtl.  We must copy one of them to prevent invalid rtl
8968 	     sharing.  */
8969 	  if (cond0 == 0)
8970 	    true0 = copy_rtx (true0);
8971 	  else if (cond1 == 0)
8972 	    true1 = copy_rtx (true1);
8973 
8974 	  if (COMPARISON_P (x))
8975 	    {
8976 	      *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8977 						true0, true1);
8978 	      *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8979 						 false0, false1);
8980 	     }
8981 	  else
8982 	    {
8983 	      *ptrue = simplify_gen_binary (code, mode, true0, true1);
8984 	      *pfalse = simplify_gen_binary (code, mode, false0, false1);
8985 	    }
8986 
8987 	  return cond0 ? cond0 : cond1;
8988 	}
8989 
8990       /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8991 	 operands is zero when the other is nonzero, and vice-versa,
8992 	 and STORE_FLAG_VALUE is 1 or -1.  */
8993 
8994       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8995 	  && (code == PLUS || code == IOR || code == XOR || code == MINUS
8996 	      || code == UMAX)
8997 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8998 	{
8999 	  rtx op0 = XEXP (XEXP (x, 0), 1);
9000 	  rtx op1 = XEXP (XEXP (x, 1), 1);
9001 
9002 	  cond0 = XEXP (XEXP (x, 0), 0);
9003 	  cond1 = XEXP (XEXP (x, 1), 0);
9004 
9005 	  if (COMPARISON_P (cond0)
9006 	      && COMPARISON_P (cond1)
9007 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9008 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9009 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9010 		  || ((swap_condition (GET_CODE (cond0))
9011 		       == reversed_comparison_code (cond1, NULL))
9012 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9013 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9014 	      && ! side_effects_p (x))
9015 	    {
9016 	      *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9017 	      *pfalse = simplify_gen_binary (MULT, mode,
9018 					     (code == MINUS
9019 					      ? simplify_gen_unary (NEG, mode,
9020 								    op1, mode)
9021 					      : op1),
9022 					      const_true_rtx);
9023 	      return cond0;
9024 	    }
9025 	}
9026 
9027       /* Similarly for MULT, AND and UMIN, except that for these the result
9028 	 is always zero.  */
9029       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9030 	  && (code == MULT || code == AND || code == UMIN)
9031 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9032 	{
9033 	  cond0 = XEXP (XEXP (x, 0), 0);
9034 	  cond1 = XEXP (XEXP (x, 1), 0);
9035 
9036 	  if (COMPARISON_P (cond0)
9037 	      && COMPARISON_P (cond1)
9038 	      && SCALAR_INT_MODE_P (mode)
9039 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9040 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9041 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9042 		  || ((swap_condition (GET_CODE (cond0))
9043 		       == reversed_comparison_code (cond1, NULL))
9044 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9045 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9046 	      && ! side_effects_p (x))
9047 	    {
9048 	      *ptrue = *pfalse = const0_rtx;
9049 	      return cond0;
9050 	    }
9051 	}
9052     }
9053 
9054   else if (code == IF_THEN_ELSE)
9055     {
9056       /* If we have IF_THEN_ELSE already, extract the condition and
9057 	 canonicalize it if it is NE or EQ.  */
9058       cond0 = XEXP (x, 0);
9059       *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9060       if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9061 	return XEXP (cond0, 0);
9062       else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9063 	{
9064 	  *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9065 	  return XEXP (cond0, 0);
9066 	}
9067       else
9068 	return cond0;
9069     }
9070 
9071   /* If X is a SUBREG, we can narrow both the true and false values
9072      if the inner expression, if there is a condition.  */
9073   else if (code == SUBREG
9074 	   && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9075 					       &true0, &false0)))
9076     {
9077       true0 = simplify_gen_subreg (mode, true0,
9078 				   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9079       false0 = simplify_gen_subreg (mode, false0,
9080 				    GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9081       if (true0 && false0)
9082 	{
9083 	  *ptrue = true0;
9084 	  *pfalse = false0;
9085 	  return cond0;
9086 	}
9087     }
9088 
9089   /* If X is a constant, this isn't special and will cause confusions
9090      if we treat it as such.  Likewise if it is equivalent to a constant.  */
9091   else if (CONSTANT_P (x)
9092 	   || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9093     ;
9094 
9095   /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9096      will be least confusing to the rest of the compiler.  */
9097   else if (mode == BImode)
9098     {
9099       *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9100       return x;
9101     }
9102 
9103   /* If X is known to be either 0 or -1, those are the true and
9104      false values when testing X.  */
9105   else if (x == constm1_rtx || x == const0_rtx
9106 	   || (mode != VOIDmode
9107 	       && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
9108     {
9109       *ptrue = constm1_rtx, *pfalse = const0_rtx;
9110       return x;
9111     }
9112 
9113   /* Likewise for 0 or a single bit.  */
9114   else if (HWI_COMPUTABLE_MODE_P (mode)
9115 	   && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
9116     {
9117       *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9118       return x;
9119     }
9120 
9121   /* Otherwise fail; show no condition with true and false values the same.  */
9122   *ptrue = *pfalse = x;
9123   return 0;
9124 }
9125 
9126 /* Return the value of expression X given the fact that condition COND
9127    is known to be true when applied to REG as its first operand and VAL
9128    as its second.  X is known to not be shared and so can be modified in
9129    place.
9130 
9131    We only handle the simplest cases, and specifically those cases that
9132    arise with IF_THEN_ELSE expressions.  */
9133 
9134 static rtx
9135 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9136 {
9137   enum rtx_code code = GET_CODE (x);
9138   const char *fmt;
9139   int i, j;
9140 
9141   if (side_effects_p (x))
9142     return x;
9143 
9144   /* If either operand of the condition is a floating point value,
9145      then we have to avoid collapsing an EQ comparison.  */
9146   if (cond == EQ
9147       && rtx_equal_p (x, reg)
9148       && ! FLOAT_MODE_P (GET_MODE (x))
9149       && ! FLOAT_MODE_P (GET_MODE (val)))
9150     return val;
9151 
9152   if (cond == UNEQ && rtx_equal_p (x, reg))
9153     return val;
9154 
9155   /* If X is (abs REG) and we know something about REG's relationship
9156      with zero, we may be able to simplify this.  */
9157 
9158   if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9159     switch (cond)
9160       {
9161       case GE:  case GT:  case EQ:
9162 	return XEXP (x, 0);
9163       case LT:  case LE:
9164 	return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9165 				   XEXP (x, 0),
9166 				   GET_MODE (XEXP (x, 0)));
9167       default:
9168 	break;
9169       }
9170 
9171   /* The only other cases we handle are MIN, MAX, and comparisons if the
9172      operands are the same as REG and VAL.  */
9173 
9174   else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9175     {
9176       if (rtx_equal_p (XEXP (x, 0), val))
9177         {
9178 	  std::swap (val, reg);
9179 	  cond = swap_condition (cond);
9180         }
9181 
9182       if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9183 	{
9184 	  if (COMPARISON_P (x))
9185 	    {
9186 	      if (comparison_dominates_p (cond, code))
9187 		return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9188 
9189 	      code = reversed_comparison_code (x, NULL);
9190 	      if (code != UNKNOWN
9191 		  && comparison_dominates_p (cond, code))
9192 		return CONST0_RTX (GET_MODE (x));
9193 	      else
9194 		return x;
9195 	    }
9196 	  else if (code == SMAX || code == SMIN
9197 		   || code == UMIN || code == UMAX)
9198 	    {
9199 	      int unsignedp = (code == UMIN || code == UMAX);
9200 
9201 	      /* Do not reverse the condition when it is NE or EQ.
9202 		 This is because we cannot conclude anything about
9203 		 the value of 'SMAX (x, y)' when x is not equal to y,
9204 		 but we can when x equals y.  */
9205 	      if ((code == SMAX || code == UMAX)
9206 		  && ! (cond == EQ || cond == NE))
9207 		cond = reverse_condition (cond);
9208 
9209 	      switch (cond)
9210 		{
9211 		case GE:   case GT:
9212 		  return unsignedp ? x : XEXP (x, 1);
9213 		case LE:   case LT:
9214 		  return unsignedp ? x : XEXP (x, 0);
9215 		case GEU:  case GTU:
9216 		  return unsignedp ? XEXP (x, 1) : x;
9217 		case LEU:  case LTU:
9218 		  return unsignedp ? XEXP (x, 0) : x;
9219 		default:
9220 		  break;
9221 		}
9222 	    }
9223 	}
9224     }
9225   else if (code == SUBREG)
9226     {
9227       machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9228       rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9229 
9230       if (SUBREG_REG (x) != r)
9231 	{
9232 	  /* We must simplify subreg here, before we lose track of the
9233 	     original inner_mode.  */
9234 	  new_rtx = simplify_subreg (GET_MODE (x), r,
9235 				     inner_mode, SUBREG_BYTE (x));
9236 	  if (new_rtx)
9237 	    return new_rtx;
9238 	  else
9239 	    SUBST (SUBREG_REG (x), r);
9240 	}
9241 
9242       return x;
9243     }
9244   /* We don't have to handle SIGN_EXTEND here, because even in the
9245      case of replacing something with a modeless CONST_INT, a
9246      CONST_INT is already (supposed to be) a valid sign extension for
9247      its narrower mode, which implies it's already properly
9248      sign-extended for the wider mode.  Now, for ZERO_EXTEND, the
9249      story is different.  */
9250   else if (code == ZERO_EXTEND)
9251     {
9252       machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9253       rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9254 
9255       if (XEXP (x, 0) != r)
9256 	{
9257 	  /* We must simplify the zero_extend here, before we lose
9258 	     track of the original inner_mode.  */
9259 	  new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9260 					      r, inner_mode);
9261 	  if (new_rtx)
9262 	    return new_rtx;
9263 	  else
9264 	    SUBST (XEXP (x, 0), r);
9265 	}
9266 
9267       return x;
9268     }
9269 
9270   fmt = GET_RTX_FORMAT (code);
9271   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9272     {
9273       if (fmt[i] == 'e')
9274 	SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9275       else if (fmt[i] == 'E')
9276 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9277 	  SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9278 						cond, reg, val));
9279     }
9280 
9281   return x;
9282 }
9283 
9284 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9285    assignment as a field assignment.  */
9286 
9287 static int
9288 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9289 {
9290   if (widen_x && GET_MODE (x) != GET_MODE (y))
9291     {
9292       if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (y)))
9293 	return 0;
9294       if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9295 	return 0;
9296       /* For big endian, adjust the memory offset.  */
9297       if (BYTES_BIG_ENDIAN)
9298 	x = adjust_address_nv (x, GET_MODE (y),
9299 			       -subreg_lowpart_offset (GET_MODE (x),
9300 						       GET_MODE (y)));
9301       else
9302 	x = adjust_address_nv (x, GET_MODE (y), 0);
9303     }
9304 
9305   if (x == y || rtx_equal_p (x, y))
9306     return 1;
9307 
9308   if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9309     return 0;
9310 
9311   /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9312      Note that all SUBREGs of MEM are paradoxical; otherwise they
9313      would have been rewritten.  */
9314   if (MEM_P (x) && GET_CODE (y) == SUBREG
9315       && MEM_P (SUBREG_REG (y))
9316       && rtx_equal_p (SUBREG_REG (y),
9317 		      gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9318     return 1;
9319 
9320   if (MEM_P (y) && GET_CODE (x) == SUBREG
9321       && MEM_P (SUBREG_REG (x))
9322       && rtx_equal_p (SUBREG_REG (x),
9323 		      gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9324     return 1;
9325 
9326   /* We used to see if get_last_value of X and Y were the same but that's
9327      not correct.  In one direction, we'll cause the assignment to have
9328      the wrong destination and in the case, we'll import a register into this
9329      insn that might have already have been dead.   So fail if none of the
9330      above cases are true.  */
9331   return 0;
9332 }
9333 
9334 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9335    Return that assignment if so.
9336 
9337    We only handle the most common cases.  */
9338 
9339 static rtx
9340 make_field_assignment (rtx x)
9341 {
9342   rtx dest = SET_DEST (x);
9343   rtx src = SET_SRC (x);
9344   rtx assign;
9345   rtx rhs, lhs;
9346   HOST_WIDE_INT c1;
9347   HOST_WIDE_INT pos;
9348   unsigned HOST_WIDE_INT len;
9349   rtx other;
9350   machine_mode mode;
9351 
9352   /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9353      a clear of a one-bit field.  We will have changed it to
9354      (and (rotate (const_int -2) POS) DEST), so check for that.  Also check
9355      for a SUBREG.  */
9356 
9357   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9358       && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9359       && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9360       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9361     {
9362       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9363 				1, 1, 1, 0);
9364       if (assign != 0)
9365 	return gen_rtx_SET (assign, const0_rtx);
9366       return x;
9367     }
9368 
9369   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9370       && subreg_lowpart_p (XEXP (src, 0))
9371       && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9372 	  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9373       && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9374       && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9375       && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9376       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9377     {
9378       assign = make_extraction (VOIDmode, dest, 0,
9379 				XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9380 				1, 1, 1, 0);
9381       if (assign != 0)
9382 	return gen_rtx_SET (assign, const0_rtx);
9383       return x;
9384     }
9385 
9386   /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9387      one-bit field.  */
9388   if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9389       && XEXP (XEXP (src, 0), 0) == const1_rtx
9390       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9391     {
9392       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9393 				1, 1, 1, 0);
9394       if (assign != 0)
9395 	return gen_rtx_SET (assign, const1_rtx);
9396       return x;
9397     }
9398 
9399   /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9400      SRC is an AND with all bits of that field set, then we can discard
9401      the AND.  */
9402   if (GET_CODE (dest) == ZERO_EXTRACT
9403       && CONST_INT_P (XEXP (dest, 1))
9404       && GET_CODE (src) == AND
9405       && CONST_INT_P (XEXP (src, 1)))
9406     {
9407       HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9408       unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9409       unsigned HOST_WIDE_INT ze_mask;
9410 
9411       if (width >= HOST_BITS_PER_WIDE_INT)
9412 	ze_mask = -1;
9413       else
9414 	ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9415 
9416       /* Complete overlap.  We can remove the source AND.  */
9417       if ((and_mask & ze_mask) == ze_mask)
9418 	return gen_rtx_SET (dest, XEXP (src, 0));
9419 
9420       /* Partial overlap.  We can reduce the source AND.  */
9421       if ((and_mask & ze_mask) != and_mask)
9422 	{
9423 	  mode = GET_MODE (src);
9424 	  src = gen_rtx_AND (mode, XEXP (src, 0),
9425 			     gen_int_mode (and_mask & ze_mask, mode));
9426 	  return gen_rtx_SET (dest, src);
9427 	}
9428     }
9429 
9430   /* The other case we handle is assignments into a constant-position
9431      field.  They look like (ior/xor (and DEST C1) OTHER).  If C1 represents
9432      a mask that has all one bits except for a group of zero bits and
9433      OTHER is known to have zeros where C1 has ones, this is such an
9434      assignment.  Compute the position and length from C1.  Shift OTHER
9435      to the appropriate position, force it to the required mode, and
9436      make the extraction.  Check for the AND in both operands.  */
9437 
9438   /* One or more SUBREGs might obscure the constant-position field
9439      assignment.  The first one we are likely to encounter is an outer
9440      narrowing SUBREG, which we can just strip for the purposes of
9441      identifying the constant-field assignment.  */
9442   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src))
9443     src = SUBREG_REG (src);
9444 
9445   if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9446     return x;
9447 
9448   rhs = expand_compound_operation (XEXP (src, 0));
9449   lhs = expand_compound_operation (XEXP (src, 1));
9450 
9451   if (GET_CODE (rhs) == AND
9452       && CONST_INT_P (XEXP (rhs, 1))
9453       && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9454     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9455   /* The second SUBREG that might get in the way is a paradoxical
9456      SUBREG around the first operand of the AND.  We want to
9457      pretend the operand is as wide as the destination here.   We
9458      do this by adjusting the MEM to wider mode for the sole
9459      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9460      note this trick only works for MEMs.  */
9461   else if (GET_CODE (rhs) == AND
9462 	   && paradoxical_subreg_p (XEXP (rhs, 0))
9463 	   && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9464 	   && CONST_INT_P (XEXP (rhs, 1))
9465 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9466 						dest, true))
9467     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9468   else if (GET_CODE (lhs) == AND
9469 	   && CONST_INT_P (XEXP (lhs, 1))
9470 	   && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9471     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9472   /* The second SUBREG that might get in the way is a paradoxical
9473      SUBREG around the first operand of the AND.  We want to
9474      pretend the operand is as wide as the destination here.   We
9475      do this by adjusting the MEM to wider mode for the sole
9476      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9477      note this trick only works for MEMs.  */
9478   else if (GET_CODE (lhs) == AND
9479 	   && paradoxical_subreg_p (XEXP (lhs, 0))
9480 	   && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9481 	   && CONST_INT_P (XEXP (lhs, 1))
9482 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9483 						dest, true))
9484     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9485   else
9486     return x;
9487 
9488   pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9489   if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9490       || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9491       || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9492     return x;
9493 
9494   assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9495   if (assign == 0)
9496     return x;
9497 
9498   /* The mode to use for the source is the mode of the assignment, or of
9499      what is inside a possible STRICT_LOW_PART.  */
9500   mode = (GET_CODE (assign) == STRICT_LOW_PART
9501 	  ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9502 
9503   /* Shift OTHER right POS places and make it the source, restricting it
9504      to the proper length and mode.  */
9505 
9506   src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9507 						     GET_MODE (src),
9508 						     other, pos),
9509 			       dest);
9510   src = force_to_mode (src, mode,
9511 		       GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9512 		       ? ~(unsigned HOST_WIDE_INT) 0
9513 		       : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9514 		       0);
9515 
9516   /* If SRC is masked by an AND that does not make a difference in
9517      the value being stored, strip it.  */
9518   if (GET_CODE (assign) == ZERO_EXTRACT
9519       && CONST_INT_P (XEXP (assign, 1))
9520       && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9521       && GET_CODE (src) == AND
9522       && CONST_INT_P (XEXP (src, 1))
9523       && UINTVAL (XEXP (src, 1))
9524 	 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9525     src = XEXP (src, 0);
9526 
9527   return gen_rtx_SET (assign, src);
9528 }
9529 
9530 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9531    if so.  */
9532 
9533 static rtx
9534 apply_distributive_law (rtx x)
9535 {
9536   enum rtx_code code = GET_CODE (x);
9537   enum rtx_code inner_code;
9538   rtx lhs, rhs, other;
9539   rtx tem;
9540 
9541   /* Distributivity is not true for floating point as it can change the
9542      value.  So we don't do it unless -funsafe-math-optimizations.  */
9543   if (FLOAT_MODE_P (GET_MODE (x))
9544       && ! flag_unsafe_math_optimizations)
9545     return x;
9546 
9547   /* The outer operation can only be one of the following:  */
9548   if (code != IOR && code != AND && code != XOR
9549       && code != PLUS && code != MINUS)
9550     return x;
9551 
9552   lhs = XEXP (x, 0);
9553   rhs = XEXP (x, 1);
9554 
9555   /* If either operand is a primitive we can't do anything, so get out
9556      fast.  */
9557   if (OBJECT_P (lhs) || OBJECT_P (rhs))
9558     return x;
9559 
9560   lhs = expand_compound_operation (lhs);
9561   rhs = expand_compound_operation (rhs);
9562   inner_code = GET_CODE (lhs);
9563   if (inner_code != GET_CODE (rhs))
9564     return x;
9565 
9566   /* See if the inner and outer operations distribute.  */
9567   switch (inner_code)
9568     {
9569     case LSHIFTRT:
9570     case ASHIFTRT:
9571     case AND:
9572     case IOR:
9573       /* These all distribute except over PLUS.  */
9574       if (code == PLUS || code == MINUS)
9575 	return x;
9576       break;
9577 
9578     case MULT:
9579       if (code != PLUS && code != MINUS)
9580 	return x;
9581       break;
9582 
9583     case ASHIFT:
9584       /* This is also a multiply, so it distributes over everything.  */
9585       break;
9586 
9587     /* This used to handle SUBREG, but this turned out to be counter-
9588        productive, since (subreg (op ...)) usually is not handled by
9589        insn patterns, and this "optimization" therefore transformed
9590        recognizable patterns into unrecognizable ones.  Therefore the
9591        SUBREG case was removed from here.
9592 
9593        It is possible that distributing SUBREG over arithmetic operations
9594        leads to an intermediate result than can then be optimized further,
9595        e.g. by moving the outer SUBREG to the other side of a SET as done
9596        in simplify_set.  This seems to have been the original intent of
9597        handling SUBREGs here.
9598 
9599        However, with current GCC this does not appear to actually happen,
9600        at least on major platforms.  If some case is found where removing
9601        the SUBREG case here prevents follow-on optimizations, distributing
9602        SUBREGs ought to be re-added at that place, e.g. in simplify_set.  */
9603 
9604     default:
9605       return x;
9606     }
9607 
9608   /* Set LHS and RHS to the inner operands (A and B in the example
9609      above) and set OTHER to the common operand (C in the example).
9610      There is only one way to do this unless the inner operation is
9611      commutative.  */
9612   if (COMMUTATIVE_ARITH_P (lhs)
9613       && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9614     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9615   else if (COMMUTATIVE_ARITH_P (lhs)
9616 	   && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9617     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9618   else if (COMMUTATIVE_ARITH_P (lhs)
9619 	   && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9620     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9621   else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9622     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9623   else
9624     return x;
9625 
9626   /* Form the new inner operation, seeing if it simplifies first.  */
9627   tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9628 
9629   /* There is one exception to the general way of distributing:
9630      (a | c) ^ (b | c) -> (a ^ b) & ~c  */
9631   if (code == XOR && inner_code == IOR)
9632     {
9633       inner_code = AND;
9634       other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9635     }
9636 
9637   /* We may be able to continuing distributing the result, so call
9638      ourselves recursively on the inner operation before forming the
9639      outer operation, which we return.  */
9640   return simplify_gen_binary (inner_code, GET_MODE (x),
9641 			      apply_distributive_law (tem), other);
9642 }
9643 
9644 /* See if X is of the form (* (+ A B) C), and if so convert to
9645    (+ (* A C) (* B C)) and try to simplify.
9646 
9647    Most of the time, this results in no change.  However, if some of
9648    the operands are the same or inverses of each other, simplifications
9649    will result.
9650 
9651    For example, (and (ior A B) (not B)) can occur as the result of
9652    expanding a bit field assignment.  When we apply the distributive
9653    law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9654    which then simplifies to (and (A (not B))).
9655 
9656    Note that no checks happen on the validity of applying the inverse
9657    distributive law.  This is pointless since we can do it in the
9658    few places where this routine is called.
9659 
9660    N is the index of the term that is decomposed (the arithmetic operation,
9661    i.e. (+ A B) in the first example above).  !N is the index of the term that
9662    is distributed, i.e. of C in the first example above.  */
9663 static rtx
9664 distribute_and_simplify_rtx (rtx x, int n)
9665 {
9666   machine_mode mode;
9667   enum rtx_code outer_code, inner_code;
9668   rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9669 
9670   /* Distributivity is not true for floating point as it can change the
9671      value.  So we don't do it unless -funsafe-math-optimizations.  */
9672   if (FLOAT_MODE_P (GET_MODE (x))
9673       && ! flag_unsafe_math_optimizations)
9674     return NULL_RTX;
9675 
9676   decomposed = XEXP (x, n);
9677   if (!ARITHMETIC_P (decomposed))
9678     return NULL_RTX;
9679 
9680   mode = GET_MODE (x);
9681   outer_code = GET_CODE (x);
9682   distributed = XEXP (x, !n);
9683 
9684   inner_code = GET_CODE (decomposed);
9685   inner_op0 = XEXP (decomposed, 0);
9686   inner_op1 = XEXP (decomposed, 1);
9687 
9688   /* Special case (and (xor B C) (not A)), which is equivalent to
9689      (xor (ior A B) (ior A C))  */
9690   if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9691     {
9692       distributed = XEXP (distributed, 0);
9693       outer_code = IOR;
9694     }
9695 
9696   if (n == 0)
9697     {
9698       /* Distribute the second term.  */
9699       new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9700       new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9701     }
9702   else
9703     {
9704       /* Distribute the first term.  */
9705       new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9706       new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9707     }
9708 
9709   tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9710 						     new_op0, new_op1));
9711   if (GET_CODE (tmp) != outer_code
9712       && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9713 	  < set_src_cost (x, mode, optimize_this_for_speed_p)))
9714     return tmp;
9715 
9716   return NULL_RTX;
9717 }
9718 
9719 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9720    in MODE.  Return an equivalent form, if different from (and VAROP
9721    (const_int CONSTOP)).  Otherwise, return NULL_RTX.  */
9722 
9723 static rtx
9724 simplify_and_const_int_1 (machine_mode mode, rtx varop,
9725 			  unsigned HOST_WIDE_INT constop)
9726 {
9727   unsigned HOST_WIDE_INT nonzero;
9728   unsigned HOST_WIDE_INT orig_constop;
9729   rtx orig_varop;
9730   int i;
9731 
9732   orig_varop = varop;
9733   orig_constop = constop;
9734   if (GET_CODE (varop) == CLOBBER)
9735     return NULL_RTX;
9736 
9737   /* Simplify VAROP knowing that we will be only looking at some of the
9738      bits in it.
9739 
9740      Note by passing in CONSTOP, we guarantee that the bits not set in
9741      CONSTOP are not significant and will never be examined.  We must
9742      ensure that is the case by explicitly masking out those bits
9743      before returning.  */
9744   varop = force_to_mode (varop, mode, constop, 0);
9745 
9746   /* If VAROP is a CLOBBER, we will fail so return it.  */
9747   if (GET_CODE (varop) == CLOBBER)
9748     return varop;
9749 
9750   /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9751      to VAROP and return the new constant.  */
9752   if (CONST_INT_P (varop))
9753     return gen_int_mode (INTVAL (varop) & constop, mode);
9754 
9755   /* See what bits may be nonzero in VAROP.  Unlike the general case of
9756      a call to nonzero_bits, here we don't care about bits outside
9757      MODE.  */
9758 
9759   nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9760 
9761   /* Turn off all bits in the constant that are known to already be zero.
9762      Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9763      which is tested below.  */
9764 
9765   constop &= nonzero;
9766 
9767   /* If we don't have any bits left, return zero.  */
9768   if (constop == 0)
9769     return const0_rtx;
9770 
9771   /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9772      a power of two, we can replace this with an ASHIFT.  */
9773   if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9774       && (i = exact_log2 (constop)) >= 0)
9775     return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9776 
9777   /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9778      or XOR, then try to apply the distributive law.  This may eliminate
9779      operations if either branch can be simplified because of the AND.
9780      It may also make some cases more complex, but those cases probably
9781      won't match a pattern either with or without this.  */
9782 
9783   if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9784     return
9785       gen_lowpart
9786 	(mode,
9787 	 apply_distributive_law
9788 	 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9789 			       simplify_and_const_int (NULL_RTX,
9790 						       GET_MODE (varop),
9791 						       XEXP (varop, 0),
9792 						       constop),
9793 			       simplify_and_const_int (NULL_RTX,
9794 						       GET_MODE (varop),
9795 						       XEXP (varop, 1),
9796 						       constop))));
9797 
9798   /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9799      the AND and see if one of the operands simplifies to zero.  If so, we
9800      may eliminate it.  */
9801 
9802   if (GET_CODE (varop) == PLUS
9803       && exact_log2 (constop + 1) >= 0)
9804     {
9805       rtx o0, o1;
9806 
9807       o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9808       o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9809       if (o0 == const0_rtx)
9810 	return o1;
9811       if (o1 == const0_rtx)
9812 	return o0;
9813     }
9814 
9815   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
9816   varop = gen_lowpart (mode, varop);
9817   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9818     return NULL_RTX;
9819 
9820   /* If we are only masking insignificant bits, return VAROP.  */
9821   if (constop == nonzero)
9822     return varop;
9823 
9824   if (varop == orig_varop && constop == orig_constop)
9825     return NULL_RTX;
9826 
9827   /* Otherwise, return an AND.  */
9828   return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9829 }
9830 
9831 
9832 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9833    in MODE.
9834 
9835    Return an equivalent form, if different from X.  Otherwise, return X.  If
9836    X is zero, we are to always construct the equivalent form.  */
9837 
9838 static rtx
9839 simplify_and_const_int (rtx x, machine_mode mode, rtx varop,
9840 			unsigned HOST_WIDE_INT constop)
9841 {
9842   rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9843   if (tem)
9844     return tem;
9845 
9846   if (!x)
9847     x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9848 			     gen_int_mode (constop, mode));
9849   if (GET_MODE (x) != mode)
9850     x = gen_lowpart (mode, x);
9851   return x;
9852 }
9853 
9854 /* Given a REG, X, compute which bits in X can be nonzero.
9855    We don't care about bits outside of those defined in MODE.
9856 
9857    For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9858    a shift, AND, or zero_extract, we can do better.  */
9859 
9860 static rtx
9861 reg_nonzero_bits_for_combine (const_rtx x, machine_mode mode,
9862 			      const_rtx known_x ATTRIBUTE_UNUSED,
9863 			      machine_mode known_mode ATTRIBUTE_UNUSED,
9864 			      unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9865 			      unsigned HOST_WIDE_INT *nonzero)
9866 {
9867   rtx tem;
9868   reg_stat_type *rsp;
9869 
9870   /* If X is a register whose nonzero bits value is current, use it.
9871      Otherwise, if X is a register whose value we can find, use that
9872      value.  Otherwise, use the previously-computed global nonzero bits
9873      for this register.  */
9874 
9875   rsp = &reg_stat[REGNO (x)];
9876   if (rsp->last_set_value != 0
9877       && (rsp->last_set_mode == mode
9878 	  || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9879 	      && GET_MODE_CLASS (mode) == MODE_INT))
9880       && ((rsp->last_set_label >= label_tick_ebb_start
9881 	   && rsp->last_set_label < label_tick)
9882 	  || (rsp->last_set_label == label_tick
9883               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9884 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9885 	      && REGNO (x) < reg_n_sets_max
9886 	      && REG_N_SETS (REGNO (x)) == 1
9887 	      && !REGNO_REG_SET_P
9888 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9889 		   REGNO (x)))))
9890     {
9891       unsigned HOST_WIDE_INT mask = rsp->last_set_nonzero_bits;
9892 
9893       if (GET_MODE_PRECISION (rsp->last_set_mode) < GET_MODE_PRECISION (mode))
9894 	/* We don't know anything about the upper bits.  */
9895 	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (rsp->last_set_mode);
9896 
9897       *nonzero &= mask;
9898       return NULL;
9899     }
9900 
9901   tem = get_last_value (x);
9902 
9903   if (tem)
9904     {
9905       if (SHORT_IMMEDIATES_SIGN_EXTEND)
9906 	tem = sign_extend_short_imm (tem, GET_MODE (x),
9907 				     GET_MODE_PRECISION (mode));
9908 
9909       return tem;
9910     }
9911   else if (nonzero_sign_valid && rsp->nonzero_bits)
9912     {
9913       unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9914 
9915       if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9916 	/* We don't know anything about the upper bits.  */
9917 	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9918 
9919       *nonzero &= mask;
9920     }
9921 
9922   return NULL;
9923 }
9924 
9925 /* Return the number of bits at the high-order end of X that are known to
9926    be equal to the sign bit.  X will be used in mode MODE; if MODE is
9927    VOIDmode, X will be used in its own mode.  The returned value  will always
9928    be between 1 and the number of bits in MODE.  */
9929 
9930 static rtx
9931 reg_num_sign_bit_copies_for_combine (const_rtx x, machine_mode mode,
9932 				     const_rtx known_x ATTRIBUTE_UNUSED,
9933 				     machine_mode known_mode
9934 				     ATTRIBUTE_UNUSED,
9935 				     unsigned int known_ret ATTRIBUTE_UNUSED,
9936 				     unsigned int *result)
9937 {
9938   rtx tem;
9939   reg_stat_type *rsp;
9940 
9941   rsp = &reg_stat[REGNO (x)];
9942   if (rsp->last_set_value != 0
9943       && rsp->last_set_mode == mode
9944       && ((rsp->last_set_label >= label_tick_ebb_start
9945 	   && rsp->last_set_label < label_tick)
9946 	  || (rsp->last_set_label == label_tick
9947               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9948 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9949 	      && REGNO (x) < reg_n_sets_max
9950 	      && REG_N_SETS (REGNO (x)) == 1
9951 	      && !REGNO_REG_SET_P
9952 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9953 		   REGNO (x)))))
9954     {
9955       *result = rsp->last_set_sign_bit_copies;
9956       return NULL;
9957     }
9958 
9959   tem = get_last_value (x);
9960   if (tem != 0)
9961     return tem;
9962 
9963   if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9964       && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9965     *result = rsp->sign_bit_copies;
9966 
9967   return NULL;
9968 }
9969 
9970 /* Return the number of "extended" bits there are in X, when interpreted
9971    as a quantity in MODE whose signedness is indicated by UNSIGNEDP.  For
9972    unsigned quantities, this is the number of high-order zero bits.
9973    For signed quantities, this is the number of copies of the sign bit
9974    minus 1.  In both case, this function returns the number of "spare"
9975    bits.  For example, if two quantities for which this function returns
9976    at least 1 are added, the addition is known not to overflow.
9977 
9978    This function will always return 0 unless called during combine, which
9979    implies that it must be called from a define_split.  */
9980 
9981 unsigned int
9982 extended_count (const_rtx x, machine_mode mode, int unsignedp)
9983 {
9984   if (nonzero_sign_valid == 0)
9985     return 0;
9986 
9987   return (unsignedp
9988 	  ? (HWI_COMPUTABLE_MODE_P (mode)
9989 	     ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9990 			       - floor_log2 (nonzero_bits (x, mode)))
9991 	     : 0)
9992 	  : num_sign_bit_copies (x, mode) - 1);
9993 }
9994 
9995 /* This function is called from `simplify_shift_const' to merge two
9996    outer operations.  Specifically, we have already found that we need
9997    to perform operation *POP0 with constant *PCONST0 at the outermost
9998    position.  We would now like to also perform OP1 with constant CONST1
9999    (with *POP0 being done last).
10000 
10001    Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10002    the resulting operation.  *PCOMP_P is set to 1 if we would need to
10003    complement the innermost operand, otherwise it is unchanged.
10004 
10005    MODE is the mode in which the operation will be done.  No bits outside
10006    the width of this mode matter.  It is assumed that the width of this mode
10007    is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10008 
10009    If *POP0 or OP1 are UNKNOWN, it means no operation is required.  Only NEG, PLUS,
10010    IOR, XOR, and AND are supported.  We may set *POP0 to SET if the proper
10011    result is simply *PCONST0.
10012 
10013    If the resulting operation cannot be expressed as one operation, we
10014    return 0 and do not change *POP0, *PCONST0, and *PCOMP_P.  */
10015 
10016 static int
10017 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10018 {
10019   enum rtx_code op0 = *pop0;
10020   HOST_WIDE_INT const0 = *pconst0;
10021 
10022   const0 &= GET_MODE_MASK (mode);
10023   const1 &= GET_MODE_MASK (mode);
10024 
10025   /* If OP0 is an AND, clear unimportant bits in CONST1.  */
10026   if (op0 == AND)
10027     const1 &= const0;
10028 
10029   /* If OP0 or OP1 is UNKNOWN, this is easy.  Similarly if they are the same or
10030      if OP0 is SET.  */
10031 
10032   if (op1 == UNKNOWN || op0 == SET)
10033     return 1;
10034 
10035   else if (op0 == UNKNOWN)
10036     op0 = op1, const0 = const1;
10037 
10038   else if (op0 == op1)
10039     {
10040       switch (op0)
10041 	{
10042 	case AND:
10043 	  const0 &= const1;
10044 	  break;
10045 	case IOR:
10046 	  const0 |= const1;
10047 	  break;
10048 	case XOR:
10049 	  const0 ^= const1;
10050 	  break;
10051 	case PLUS:
10052 	  const0 += const1;
10053 	  break;
10054 	case NEG:
10055 	  op0 = UNKNOWN;
10056 	  break;
10057 	default:
10058 	  break;
10059 	}
10060     }
10061 
10062   /* Otherwise, if either is a PLUS or NEG, we can't do anything.  */
10063   else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10064     return 0;
10065 
10066   /* If the two constants aren't the same, we can't do anything.  The
10067      remaining six cases can all be done.  */
10068   else if (const0 != const1)
10069     return 0;
10070 
10071   else
10072     switch (op0)
10073       {
10074       case IOR:
10075 	if (op1 == AND)
10076 	  /* (a & b) | b == b */
10077 	  op0 = SET;
10078 	else /* op1 == XOR */
10079 	  /* (a ^ b) | b == a | b */
10080 	  {;}
10081 	break;
10082 
10083       case XOR:
10084 	if (op1 == AND)
10085 	  /* (a & b) ^ b == (~a) & b */
10086 	  op0 = AND, *pcomp_p = 1;
10087 	else /* op1 == IOR */
10088 	  /* (a | b) ^ b == a & ~b */
10089 	  op0 = AND, const0 = ~const0;
10090 	break;
10091 
10092       case AND:
10093 	if (op1 == IOR)
10094 	  /* (a | b) & b == b */
10095 	op0 = SET;
10096 	else /* op1 == XOR */
10097 	  /* (a ^ b) & b) == (~a) & b */
10098 	  *pcomp_p = 1;
10099 	break;
10100       default:
10101 	break;
10102       }
10103 
10104   /* Check for NO-OP cases.  */
10105   const0 &= GET_MODE_MASK (mode);
10106   if (const0 == 0
10107       && (op0 == IOR || op0 == XOR || op0 == PLUS))
10108     op0 = UNKNOWN;
10109   else if (const0 == 0 && op0 == AND)
10110     op0 = SET;
10111   else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10112 	   && op0 == AND)
10113     op0 = UNKNOWN;
10114 
10115   *pop0 = op0;
10116 
10117   /* ??? Slightly redundant with the above mask, but not entirely.
10118      Moving this above means we'd have to sign-extend the mode mask
10119      for the final test.  */
10120   if (op0 != UNKNOWN && op0 != NEG)
10121     *pconst0 = trunc_int_for_mode (const0, mode);
10122 
10123   return 1;
10124 }
10125 
10126 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10127    the shift in.  The original shift operation CODE is performed on OP in
10128    ORIG_MODE.  Return the wider mode MODE if we can perform the operation
10129    in that mode.  Return ORIG_MODE otherwise.  We can also assume that the
10130    result of the shift is subject to operation OUTER_CODE with operand
10131    OUTER_CONST.  */
10132 
10133 static machine_mode
10134 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10135 		      machine_mode orig_mode, machine_mode mode,
10136 		      enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10137 {
10138   if (orig_mode == mode)
10139     return mode;
10140   gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10141 
10142   /* In general we can't perform in wider mode for right shift and rotate.  */
10143   switch (code)
10144     {
10145     case ASHIFTRT:
10146       /* We can still widen if the bits brought in from the left are identical
10147 	 to the sign bit of ORIG_MODE.  */
10148       if (num_sign_bit_copies (op, mode)
10149 	  > (unsigned) (GET_MODE_PRECISION (mode)
10150 			- GET_MODE_PRECISION (orig_mode)))
10151 	return mode;
10152       return orig_mode;
10153 
10154     case LSHIFTRT:
10155       /* Similarly here but with zero bits.  */
10156       if (HWI_COMPUTABLE_MODE_P (mode)
10157 	  && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10158 	return mode;
10159 
10160       /* We can also widen if the bits brought in will be masked off.  This
10161 	 operation is performed in ORIG_MODE.  */
10162       if (outer_code == AND)
10163 	{
10164 	  int care_bits = low_bitmask_len (orig_mode, outer_const);
10165 
10166 	  if (care_bits >= 0
10167 	      && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10168 	    return mode;
10169 	}
10170       /* fall through */
10171 
10172     case ROTATE:
10173       return orig_mode;
10174 
10175     case ROTATERT:
10176       gcc_unreachable ();
10177 
10178     default:
10179       return mode;
10180     }
10181 }
10182 
10183 /* Simplify a shift of VAROP by ORIG_COUNT bits.  CODE says what kind
10184    of shift.  The result of the shift is RESULT_MODE.  Return NULL_RTX
10185    if we cannot simplify it.  Otherwise, return a simplified value.
10186 
10187    The shift is normally computed in the widest mode we find in VAROP, as
10188    long as it isn't a different number of words than RESULT_MODE.  Exceptions
10189    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
10190 
10191 static rtx
10192 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10193 			rtx varop, int orig_count)
10194 {
10195   enum rtx_code orig_code = code;
10196   rtx orig_varop = varop;
10197   int count;
10198   machine_mode mode = result_mode;
10199   machine_mode shift_mode, tmode;
10200   unsigned int mode_words
10201     = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10202   /* We form (outer_op (code varop count) (outer_const)).  */
10203   enum rtx_code outer_op = UNKNOWN;
10204   HOST_WIDE_INT outer_const = 0;
10205   int complement_p = 0;
10206   rtx new_rtx, x;
10207 
10208   /* Make sure and truncate the "natural" shift on the way in.  We don't
10209      want to do this inside the loop as it makes it more difficult to
10210      combine shifts.  */
10211   if (SHIFT_COUNT_TRUNCATED)
10212     orig_count &= GET_MODE_BITSIZE (mode) - 1;
10213 
10214   /* If we were given an invalid count, don't do anything except exactly
10215      what was requested.  */
10216 
10217   if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
10218     return NULL_RTX;
10219 
10220   count = orig_count;
10221 
10222   /* Unless one of the branches of the `if' in this loop does a `continue',
10223      we will `break' the loop after the `if'.  */
10224 
10225   while (count != 0)
10226     {
10227       /* If we have an operand of (clobber (const_int 0)), fail.  */
10228       if (GET_CODE (varop) == CLOBBER)
10229 	return NULL_RTX;
10230 
10231       /* Convert ROTATERT to ROTATE.  */
10232       if (code == ROTATERT)
10233 	{
10234 	  unsigned int bitsize = GET_MODE_PRECISION (result_mode);
10235 	  code = ROTATE;
10236 	  if (VECTOR_MODE_P (result_mode))
10237 	    count = bitsize / GET_MODE_NUNITS (result_mode) - count;
10238 	  else
10239 	    count = bitsize - count;
10240 	}
10241 
10242       shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
10243 					 mode, outer_op, outer_const);
10244 
10245       /* Handle cases where the count is greater than the size of the mode
10246 	 minus 1.  For ASHIFT, use the size minus one as the count (this can
10247 	 occur when simplifying (lshiftrt (ashiftrt ..))).  For rotates,
10248 	 take the count modulo the size.  For other shifts, the result is
10249 	 zero.
10250 
10251 	 Since these shifts are being produced by the compiler by combining
10252 	 multiple operations, each of which are defined, we know what the
10253 	 result is supposed to be.  */
10254 
10255       if (count > (GET_MODE_PRECISION (shift_mode) - 1))
10256 	{
10257 	  if (code == ASHIFTRT)
10258 	    count = GET_MODE_PRECISION (shift_mode) - 1;
10259 	  else if (code == ROTATE || code == ROTATERT)
10260 	    count %= GET_MODE_PRECISION (shift_mode);
10261 	  else
10262 	    {
10263 	      /* We can't simply return zero because there may be an
10264 		 outer op.  */
10265 	      varop = const0_rtx;
10266 	      count = 0;
10267 	      break;
10268 	    }
10269 	}
10270 
10271       /* If we discovered we had to complement VAROP, leave.  Making a NOT
10272 	 here would cause an infinite loop.  */
10273       if (complement_p)
10274 	break;
10275 
10276       /* An arithmetic right shift of a quantity known to be -1 or 0
10277 	 is a no-op.  */
10278       if (code == ASHIFTRT
10279 	  && (num_sign_bit_copies (varop, shift_mode)
10280 	      == GET_MODE_PRECISION (shift_mode)))
10281 	{
10282 	  count = 0;
10283 	  break;
10284 	}
10285 
10286       /* If we are doing an arithmetic right shift and discarding all but
10287 	 the sign bit copies, this is equivalent to doing a shift by the
10288 	 bitsize minus one.  Convert it into that shift because it will often
10289 	 allow other simplifications.  */
10290 
10291       if (code == ASHIFTRT
10292 	  && (count + num_sign_bit_copies (varop, shift_mode)
10293 	      >= GET_MODE_PRECISION (shift_mode)))
10294 	count = GET_MODE_PRECISION (shift_mode) - 1;
10295 
10296       /* We simplify the tests below and elsewhere by converting
10297 	 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10298 	 `make_compound_operation' will convert it to an ASHIFTRT for
10299 	 those machines (such as VAX) that don't have an LSHIFTRT.  */
10300       if (code == ASHIFTRT
10301 	  && val_signbit_known_clear_p (shift_mode,
10302 					nonzero_bits (varop, shift_mode)))
10303 	code = LSHIFTRT;
10304 
10305       if (((code == LSHIFTRT
10306 	    && HWI_COMPUTABLE_MODE_P (shift_mode)
10307 	    && !(nonzero_bits (varop, shift_mode) >> count))
10308 	   || (code == ASHIFT
10309 	       && HWI_COMPUTABLE_MODE_P (shift_mode)
10310 	       && !((nonzero_bits (varop, shift_mode) << count)
10311 		    & GET_MODE_MASK (shift_mode))))
10312 	  && !side_effects_p (varop))
10313 	varop = const0_rtx;
10314 
10315       switch (GET_CODE (varop))
10316 	{
10317 	case SIGN_EXTEND:
10318 	case ZERO_EXTEND:
10319 	case SIGN_EXTRACT:
10320 	case ZERO_EXTRACT:
10321 	  new_rtx = expand_compound_operation (varop);
10322 	  if (new_rtx != varop)
10323 	    {
10324 	      varop = new_rtx;
10325 	      continue;
10326 	    }
10327 	  break;
10328 
10329 	case MEM:
10330 	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10331 	     minus the width of a smaller mode, we can do this with a
10332 	     SIGN_EXTEND or ZERO_EXTEND from the narrower memory location.  */
10333 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10334 	      && ! mode_dependent_address_p (XEXP (varop, 0),
10335 					     MEM_ADDR_SPACE (varop))
10336 	      && ! MEM_VOLATILE_P (varop)
10337 	      && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10338 					 MODE_INT, 1)) != BLKmode)
10339 	    {
10340 	      new_rtx = adjust_address_nv (varop, tmode,
10341 				       BYTES_BIG_ENDIAN ? 0
10342 				       : count / BITS_PER_UNIT);
10343 
10344 	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10345 				     : ZERO_EXTEND, mode, new_rtx);
10346 	      count = 0;
10347 	      continue;
10348 	    }
10349 	  break;
10350 
10351 	case SUBREG:
10352 	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
10353 	     the same number of words as what we've seen so far.  Then store
10354 	     the widest mode in MODE.  */
10355 	  if (subreg_lowpart_p (varop)
10356 	      && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10357 		  > GET_MODE_SIZE (GET_MODE (varop)))
10358 	      && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10359 				  + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10360 		 == mode_words
10361 	      && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10362 	      && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10363 	    {
10364 	      varop = SUBREG_REG (varop);
10365 	      if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10366 		mode = GET_MODE (varop);
10367 	      continue;
10368 	    }
10369 	  break;
10370 
10371 	case MULT:
10372 	  /* Some machines use MULT instead of ASHIFT because MULT
10373 	     is cheaper.  But it is still better on those machines to
10374 	     merge two shifts into one.  */
10375 	  if (CONST_INT_P (XEXP (varop, 1))
10376 	      && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10377 	    {
10378 	      varop
10379 		= simplify_gen_binary (ASHIFT, GET_MODE (varop),
10380 				       XEXP (varop, 0),
10381 				       GEN_INT (exact_log2 (
10382 						UINTVAL (XEXP (varop, 1)))));
10383 	      continue;
10384 	    }
10385 	  break;
10386 
10387 	case UDIV:
10388 	  /* Similar, for when divides are cheaper.  */
10389 	  if (CONST_INT_P (XEXP (varop, 1))
10390 	      && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10391 	    {
10392 	      varop
10393 		= simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10394 				       XEXP (varop, 0),
10395 				       GEN_INT (exact_log2 (
10396 						UINTVAL (XEXP (varop, 1)))));
10397 	      continue;
10398 	    }
10399 	  break;
10400 
10401 	case ASHIFTRT:
10402 	  /* If we are extracting just the sign bit of an arithmetic
10403 	     right shift, that shift is not needed.  However, the sign
10404 	     bit of a wider mode may be different from what would be
10405 	     interpreted as the sign bit in a narrower mode, so, if
10406 	     the result is narrower, don't discard the shift.  */
10407 	  if (code == LSHIFTRT
10408 	      && count == (GET_MODE_BITSIZE (result_mode) - 1)
10409 	      && (GET_MODE_BITSIZE (result_mode)
10410 		  >= GET_MODE_BITSIZE (GET_MODE (varop))))
10411 	    {
10412 	      varop = XEXP (varop, 0);
10413 	      continue;
10414 	    }
10415 
10416 	  /* ... fall through ...  */
10417 
10418 	case LSHIFTRT:
10419 	case ASHIFT:
10420 	case ROTATE:
10421 	  /* Here we have two nested shifts.  The result is usually the
10422 	     AND of a new shift with a mask.  We compute the result below.  */
10423 	  if (CONST_INT_P (XEXP (varop, 1))
10424 	      && INTVAL (XEXP (varop, 1)) >= 0
10425 	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10426 	      && HWI_COMPUTABLE_MODE_P (result_mode)
10427 	      && HWI_COMPUTABLE_MODE_P (mode)
10428 	      && !VECTOR_MODE_P (result_mode))
10429 	    {
10430 	      enum rtx_code first_code = GET_CODE (varop);
10431 	      unsigned int first_count = INTVAL (XEXP (varop, 1));
10432 	      unsigned HOST_WIDE_INT mask;
10433 	      rtx mask_rtx;
10434 
10435 	      /* We have one common special case.  We can't do any merging if
10436 		 the inner code is an ASHIFTRT of a smaller mode.  However, if
10437 		 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10438 		 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10439 		 we can convert it to
10440 		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10441 		 This simplifies certain SIGN_EXTEND operations.  */
10442 	      if (code == ASHIFT && first_code == ASHIFTRT
10443 		  && count == (GET_MODE_PRECISION (result_mode)
10444 			       - GET_MODE_PRECISION (GET_MODE (varop))))
10445 		{
10446 		  /* C3 has the low-order C1 bits zero.  */
10447 
10448 		  mask = GET_MODE_MASK (mode)
10449 			 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10450 
10451 		  varop = simplify_and_const_int (NULL_RTX, result_mode,
10452 						  XEXP (varop, 0), mask);
10453 		  varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10454 						varop, count);
10455 		  count = first_count;
10456 		  code = ASHIFTRT;
10457 		  continue;
10458 		}
10459 
10460 	      /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10461 		 than C1 high-order bits equal to the sign bit, we can convert
10462 		 this to either an ASHIFT or an ASHIFTRT depending on the
10463 		 two counts.
10464 
10465 		 We cannot do this if VAROP's mode is not SHIFT_MODE.  */
10466 
10467 	      if (code == ASHIFTRT && first_code == ASHIFT
10468 		  && GET_MODE (varop) == shift_mode
10469 		  && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10470 		      > first_count))
10471 		{
10472 		  varop = XEXP (varop, 0);
10473 		  count -= first_count;
10474 		  if (count < 0)
10475 		    {
10476 		      count = -count;
10477 		      code = ASHIFT;
10478 		    }
10479 
10480 		  continue;
10481 		}
10482 
10483 	      /* There are some cases we can't do.  If CODE is ASHIFTRT,
10484 		 we can only do this if FIRST_CODE is also ASHIFTRT.
10485 
10486 		 We can't do the case when CODE is ROTATE and FIRST_CODE is
10487 		 ASHIFTRT.
10488 
10489 		 If the mode of this shift is not the mode of the outer shift,
10490 		 we can't do this if either shift is a right shift or ROTATE.
10491 
10492 		 Finally, we can't do any of these if the mode is too wide
10493 		 unless the codes are the same.
10494 
10495 		 Handle the case where the shift codes are the same
10496 		 first.  */
10497 
10498 	      if (code == first_code)
10499 		{
10500 		  if (GET_MODE (varop) != result_mode
10501 		      && (code == ASHIFTRT || code == LSHIFTRT
10502 			  || code == ROTATE))
10503 		    break;
10504 
10505 		  count += first_count;
10506 		  varop = XEXP (varop, 0);
10507 		  continue;
10508 		}
10509 
10510 	      if (code == ASHIFTRT
10511 		  || (code == ROTATE && first_code == ASHIFTRT)
10512 		  || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10513 		  || (GET_MODE (varop) != result_mode
10514 		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
10515 			  || first_code == ROTATE
10516 			  || code == ROTATE)))
10517 		break;
10518 
10519 	      /* To compute the mask to apply after the shift, shift the
10520 		 nonzero bits of the inner shift the same way the
10521 		 outer shift will.  */
10522 
10523 	      mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
10524 				       result_mode);
10525 
10526 	      mask_rtx
10527 		= simplify_const_binary_operation (code, result_mode, mask_rtx,
10528 						   GEN_INT (count));
10529 
10530 	      /* Give up if we can't compute an outer operation to use.  */
10531 	      if (mask_rtx == 0
10532 		  || !CONST_INT_P (mask_rtx)
10533 		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
10534 					INTVAL (mask_rtx),
10535 					result_mode, &complement_p))
10536 		break;
10537 
10538 	      /* If the shifts are in the same direction, we add the
10539 		 counts.  Otherwise, we subtract them.  */
10540 	      if ((code == ASHIFTRT || code == LSHIFTRT)
10541 		  == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10542 		count += first_count;
10543 	      else
10544 		count -= first_count;
10545 
10546 	      /* If COUNT is positive, the new shift is usually CODE,
10547 		 except for the two exceptions below, in which case it is
10548 		 FIRST_CODE.  If the count is negative, FIRST_CODE should
10549 		 always be used  */
10550 	      if (count > 0
10551 		  && ((first_code == ROTATE && code == ASHIFT)
10552 		      || (first_code == ASHIFTRT && code == LSHIFTRT)))
10553 		code = first_code;
10554 	      else if (count < 0)
10555 		code = first_code, count = -count;
10556 
10557 	      varop = XEXP (varop, 0);
10558 	      continue;
10559 	    }
10560 
10561 	  /* If we have (A << B << C) for any shift, we can convert this to
10562 	     (A << C << B).  This wins if A is a constant.  Only try this if
10563 	     B is not a constant.  */
10564 
10565 	  else if (GET_CODE (varop) == code
10566 		   && CONST_INT_P (XEXP (varop, 0))
10567 		   && !CONST_INT_P (XEXP (varop, 1)))
10568 	    {
10569 	      /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10570 		 sure the result will be masked.  See PR70222.  */
10571 	      if (code == LSHIFTRT
10572 		  && mode != result_mode
10573 		  && !merge_outer_ops (&outer_op, &outer_const, AND,
10574 				       GET_MODE_MASK (result_mode)
10575 				       >> orig_count, result_mode,
10576 				       &complement_p))
10577 		break;
10578 	      /* For ((int) (cstLL >> count)) >> cst2 just give up.  Queuing
10579 		 up outer sign extension (often left and right shift) is
10580 		 hardly more efficient than the original.  See PR70429.  */
10581 	      if (code == ASHIFTRT && mode != result_mode)
10582 		break;
10583 
10584 	      rtx new_rtx = simplify_const_binary_operation (code, mode,
10585 							     XEXP (varop, 0),
10586 							     GEN_INT (count));
10587 	      varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10588 	      count = 0;
10589 	      continue;
10590 	    }
10591 	  break;
10592 
10593 	case NOT:
10594 	  if (VECTOR_MODE_P (mode))
10595 	    break;
10596 
10597 	  /* Make this fit the case below.  */
10598 	  varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10599 	  continue;
10600 
10601 	case IOR:
10602 	case AND:
10603 	case XOR:
10604 	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10605 	     with C the size of VAROP - 1 and the shift is logical if
10606 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10607 	     we have an (le X 0) operation.   If we have an arithmetic shift
10608 	     and STORE_FLAG_VALUE is 1 or we have a logical shift with
10609 	     STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation.  */
10610 
10611 	  if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10612 	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10613 	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10614 	      && (code == LSHIFTRT || code == ASHIFTRT)
10615 	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10616 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10617 	    {
10618 	      count = 0;
10619 	      varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10620 				  const0_rtx);
10621 
10622 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10623 		varop = gen_rtx_NEG (GET_MODE (varop), varop);
10624 
10625 	      continue;
10626 	    }
10627 
10628 	  /* If we have (shift (logical)), move the logical to the outside
10629 	     to allow it to possibly combine with another logical and the
10630 	     shift to combine with another shift.  This also canonicalizes to
10631 	     what a ZERO_EXTRACT looks like.  Also, some machines have
10632 	     (and (shift)) insns.  */
10633 
10634 	  if (CONST_INT_P (XEXP (varop, 1))
10635 	      /* We can't do this if we have (ashiftrt (xor))  and the
10636 		 constant has its sign bit set in shift_mode with shift_mode
10637 		 wider than result_mode.  */
10638 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10639 		   && result_mode != shift_mode
10640 		   && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10641 					      shift_mode))
10642 	      && (new_rtx = simplify_const_binary_operation
10643 		  (code, result_mode,
10644 		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10645 		   GEN_INT (count))) != 0
10646 	      && CONST_INT_P (new_rtx)
10647 	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10648 				  INTVAL (new_rtx), result_mode, &complement_p))
10649 	    {
10650 	      varop = XEXP (varop, 0);
10651 	      continue;
10652 	    }
10653 
10654 	  /* If we can't do that, try to simplify the shift in each arm of the
10655 	     logical expression, make a new logical expression, and apply
10656 	     the inverse distributive law.  This also can't be done for
10657 	     (ashiftrt (xor)) where we've widened the shift and the constant
10658 	     changes the sign bit.  */
10659 	  if (CONST_INT_P (XEXP (varop, 1))
10660 	     && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10661 		  && result_mode != shift_mode
10662 		  && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10663 					     shift_mode)))
10664 	    {
10665 	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10666 					      XEXP (varop, 0), count);
10667 	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10668 					      XEXP (varop, 1), count);
10669 
10670 	      varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10671 					   lhs, rhs);
10672 	      varop = apply_distributive_law (varop);
10673 
10674 	      count = 0;
10675 	      continue;
10676 	    }
10677 	  break;
10678 
10679 	case EQ:
10680 	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10681 	     says that the sign bit can be tested, FOO has mode MODE, C is
10682 	     GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10683 	     that may be nonzero.  */
10684 	  if (code == LSHIFTRT
10685 	      && XEXP (varop, 1) == const0_rtx
10686 	      && GET_MODE (XEXP (varop, 0)) == result_mode
10687 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10688 	      && HWI_COMPUTABLE_MODE_P (result_mode)
10689 	      && STORE_FLAG_VALUE == -1
10690 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10691 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10692 				  &complement_p))
10693 	    {
10694 	      varop = XEXP (varop, 0);
10695 	      count = 0;
10696 	      continue;
10697 	    }
10698 	  break;
10699 
10700 	case NEG:
10701 	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10702 	     than the number of bits in the mode is equivalent to A.  */
10703 	  if (code == LSHIFTRT
10704 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10705 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10706 	    {
10707 	      varop = XEXP (varop, 0);
10708 	      count = 0;
10709 	      continue;
10710 	    }
10711 
10712 	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
10713 	     NEG outside to allow shifts to combine.  */
10714 	  if (code == ASHIFT
10715 	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10716 				  &complement_p))
10717 	    {
10718 	      varop = XEXP (varop, 0);
10719 	      continue;
10720 	    }
10721 	  break;
10722 
10723 	case PLUS:
10724 	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10725 	     is one less than the number of bits in the mode is
10726 	     equivalent to (xor A 1).  */
10727 	  if (code == LSHIFTRT
10728 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10729 	      && XEXP (varop, 1) == constm1_rtx
10730 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10731 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10732 				  &complement_p))
10733 	    {
10734 	      count = 0;
10735 	      varop = XEXP (varop, 0);
10736 	      continue;
10737 	    }
10738 
10739 	  /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10740 	     that might be nonzero in BAR are those being shifted out and those
10741 	     bits are known zero in FOO, we can replace the PLUS with FOO.
10742 	     Similarly in the other operand order.  This code occurs when
10743 	     we are computing the size of a variable-size array.  */
10744 
10745 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10746 	      && count < HOST_BITS_PER_WIDE_INT
10747 	      && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10748 	      && (nonzero_bits (XEXP (varop, 1), result_mode)
10749 		  & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10750 	    {
10751 	      varop = XEXP (varop, 0);
10752 	      continue;
10753 	    }
10754 	  else if ((code == ASHIFTRT || code == LSHIFTRT)
10755 		   && count < HOST_BITS_PER_WIDE_INT
10756 		   && HWI_COMPUTABLE_MODE_P (result_mode)
10757 		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10758 			    >> count)
10759 		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10760 			    & nonzero_bits (XEXP (varop, 1),
10761 						 result_mode)))
10762 	    {
10763 	      varop = XEXP (varop, 1);
10764 	      continue;
10765 	    }
10766 
10767 	  /* (ashift (plus foo C) N) is (plus (ashift foo N) C').  */
10768 	  if (code == ASHIFT
10769 	      && CONST_INT_P (XEXP (varop, 1))
10770 	      && (new_rtx = simplify_const_binary_operation
10771 		  (ASHIFT, result_mode,
10772 		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10773 		   GEN_INT (count))) != 0
10774 	      && CONST_INT_P (new_rtx)
10775 	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
10776 				  INTVAL (new_rtx), result_mode, &complement_p))
10777 	    {
10778 	      varop = XEXP (varop, 0);
10779 	      continue;
10780 	    }
10781 
10782 	  /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10783 	     signbit', and attempt to change the PLUS to an XOR and move it to
10784 	     the outer operation as is done above in the AND/IOR/XOR case
10785 	     leg for shift(logical). See details in logical handling above
10786 	     for reasoning in doing so.  */
10787 	  if (code == LSHIFTRT
10788 	      && CONST_INT_P (XEXP (varop, 1))
10789 	      && mode_signbit_p (result_mode, XEXP (varop, 1))
10790 	      && (new_rtx = simplify_const_binary_operation
10791 		  (code, result_mode,
10792 		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10793 		   GEN_INT (count))) != 0
10794 	      && CONST_INT_P (new_rtx)
10795 	      && merge_outer_ops (&outer_op, &outer_const, XOR,
10796 				  INTVAL (new_rtx), result_mode, &complement_p))
10797 	    {
10798 	      varop = XEXP (varop, 0);
10799 	      continue;
10800 	    }
10801 
10802 	  break;
10803 
10804 	case MINUS:
10805 	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10806 	     with C the size of VAROP - 1 and the shift is logical if
10807 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10808 	     we have a (gt X 0) operation.  If the shift is arithmetic with
10809 	     STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10810 	     we have a (neg (gt X 0)) operation.  */
10811 
10812 	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10813 	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10814 	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10815 	      && (code == LSHIFTRT || code == ASHIFTRT)
10816 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10817 	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10818 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10819 	    {
10820 	      count = 0;
10821 	      varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10822 				  const0_rtx);
10823 
10824 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10825 		varop = gen_rtx_NEG (GET_MODE (varop), varop);
10826 
10827 	      continue;
10828 	    }
10829 	  break;
10830 
10831 	case TRUNCATE:
10832 	  /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10833 	     if the truncate does not affect the value.  */
10834 	  if (code == LSHIFTRT
10835 	      && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10836 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10837 	      && (INTVAL (XEXP (XEXP (varop, 0), 1))
10838 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10839 		      - GET_MODE_PRECISION (GET_MODE (varop)))))
10840 	    {
10841 	      rtx varop_inner = XEXP (varop, 0);
10842 
10843 	      varop_inner
10844 		= gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10845 				    XEXP (varop_inner, 0),
10846 				    GEN_INT
10847 				    (count + INTVAL (XEXP (varop_inner, 1))));
10848 	      varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10849 	      count = 0;
10850 	      continue;
10851 	    }
10852 	  break;
10853 
10854 	default:
10855 	  break;
10856 	}
10857 
10858       break;
10859     }
10860 
10861   shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10862 				     outer_op, outer_const);
10863 
10864   /* We have now finished analyzing the shift.  The result should be
10865      a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
10866      OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10867      to the result of the shift.  OUTER_CONST is the relevant constant,
10868      but we must turn off all bits turned off in the shift.  */
10869 
10870   if (outer_op == UNKNOWN
10871       && orig_code == code && orig_count == count
10872       && varop == orig_varop
10873       && shift_mode == GET_MODE (varop))
10874     return NULL_RTX;
10875 
10876   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
10877   varop = gen_lowpart (shift_mode, varop);
10878   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10879     return NULL_RTX;
10880 
10881   /* If we have an outer operation and we just made a shift, it is
10882      possible that we could have simplified the shift were it not
10883      for the outer operation.  So try to do the simplification
10884      recursively.  */
10885 
10886   if (outer_op != UNKNOWN)
10887     x = simplify_shift_const_1 (code, shift_mode, varop, count);
10888   else
10889     x = NULL_RTX;
10890 
10891   if (x == NULL_RTX)
10892     x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10893 
10894   /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10895      turn off all the bits that the shift would have turned off.  */
10896   if (orig_code == LSHIFTRT && result_mode != shift_mode)
10897     x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10898 				GET_MODE_MASK (result_mode) >> orig_count);
10899 
10900   /* Do the remainder of the processing in RESULT_MODE.  */
10901   x = gen_lowpart_or_truncate (result_mode, x);
10902 
10903   /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10904      operation.  */
10905   if (complement_p)
10906     x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10907 
10908   if (outer_op != UNKNOWN)
10909     {
10910       if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10911 	  && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10912 	outer_const = trunc_int_for_mode (outer_const, result_mode);
10913 
10914       if (outer_op == AND)
10915 	x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10916       else if (outer_op == SET)
10917 	{
10918 	  /* This means that we have determined that the result is
10919 	     equivalent to a constant.  This should be rare.  */
10920 	  if (!side_effects_p (x))
10921 	    x = GEN_INT (outer_const);
10922 	}
10923       else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10924 	x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10925       else
10926 	x = simplify_gen_binary (outer_op, result_mode, x,
10927 				 GEN_INT (outer_const));
10928     }
10929 
10930   return x;
10931 }
10932 
10933 /* Simplify a shift of VAROP by COUNT bits.  CODE says what kind of shift.
10934    The result of the shift is RESULT_MODE.  If we cannot simplify it,
10935    return X or, if it is NULL, synthesize the expression with
10936    simplify_gen_binary.  Otherwise, return a simplified value.
10937 
10938    The shift is normally computed in the widest mode we find in VAROP, as
10939    long as it isn't a different number of words than RESULT_MODE.  Exceptions
10940    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
10941 
10942 static rtx
10943 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
10944 		      rtx varop, int count)
10945 {
10946   rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10947   if (tem)
10948     return tem;
10949 
10950   if (!x)
10951     x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10952   if (GET_MODE (x) != result_mode)
10953     x = gen_lowpart (result_mode, x);
10954   return x;
10955 }
10956 
10957 
10958 /* A subroutine of recog_for_combine.  See there for arguments and
10959    return value.  */
10960 
10961 static int
10962 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
10963 {
10964   rtx pat = *pnewpat;
10965   rtx pat_without_clobbers;
10966   int insn_code_number;
10967   int num_clobbers_to_add = 0;
10968   int i;
10969   rtx notes = NULL_RTX;
10970   rtx old_notes, old_pat;
10971   int old_icode;
10972 
10973   /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10974      we use to indicate that something didn't match.  If we find such a
10975      thing, force rejection.  */
10976   if (GET_CODE (pat) == PARALLEL)
10977     for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10978       if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10979 	  && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10980 	return -1;
10981 
10982   old_pat = PATTERN (insn);
10983   old_notes = REG_NOTES (insn);
10984   PATTERN (insn) = pat;
10985   REG_NOTES (insn) = NULL_RTX;
10986 
10987   insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10988   if (dump_file && (dump_flags & TDF_DETAILS))
10989     {
10990       if (insn_code_number < 0)
10991 	fputs ("Failed to match this instruction:\n", dump_file);
10992       else
10993 	fputs ("Successfully matched this instruction:\n", dump_file);
10994       print_rtl_single (dump_file, pat);
10995     }
10996 
10997   /* If it isn't, there is the possibility that we previously had an insn
10998      that clobbered some register as a side effect, but the combined
10999      insn doesn't need to do that.  So try once more without the clobbers
11000      unless this represents an ASM insn.  */
11001 
11002   if (insn_code_number < 0 && ! check_asm_operands (pat)
11003       && GET_CODE (pat) == PARALLEL)
11004     {
11005       int pos;
11006 
11007       for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11008 	if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11009 	  {
11010 	    if (i != pos)
11011 	      SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11012 	    pos++;
11013 	  }
11014 
11015       SUBST_INT (XVECLEN (pat, 0), pos);
11016 
11017       if (pos == 1)
11018 	pat = XVECEXP (pat, 0, 0);
11019 
11020       PATTERN (insn) = pat;
11021       insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11022       if (dump_file && (dump_flags & TDF_DETAILS))
11023 	{
11024 	  if (insn_code_number < 0)
11025 	    fputs ("Failed to match this instruction:\n", dump_file);
11026 	  else
11027 	    fputs ("Successfully matched this instruction:\n", dump_file);
11028 	  print_rtl_single (dump_file, pat);
11029 	}
11030     }
11031 
11032   pat_without_clobbers = pat;
11033 
11034   PATTERN (insn) = old_pat;
11035   REG_NOTES (insn) = old_notes;
11036 
11037   /* Recognize all noop sets, these will be killed by followup pass.  */
11038   if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11039     insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11040 
11041   /* If we had any clobbers to add, make a new pattern than contains
11042      them.  Then check to make sure that all of them are dead.  */
11043   if (num_clobbers_to_add)
11044     {
11045       rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11046 				     rtvec_alloc (GET_CODE (pat) == PARALLEL
11047 						  ? (XVECLEN (pat, 0)
11048 						     + num_clobbers_to_add)
11049 						  : num_clobbers_to_add + 1));
11050 
11051       if (GET_CODE (pat) == PARALLEL)
11052 	for (i = 0; i < XVECLEN (pat, 0); i++)
11053 	  XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11054       else
11055 	XVECEXP (newpat, 0, 0) = pat;
11056 
11057       add_clobbers (newpat, insn_code_number);
11058 
11059       for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11060 	   i < XVECLEN (newpat, 0); i++)
11061 	{
11062 	  if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11063 	      && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11064 	    return -1;
11065 	  if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11066 	    {
11067 	      gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11068 	      notes = alloc_reg_note (REG_UNUSED,
11069 				      XEXP (XVECEXP (newpat, 0, i), 0), notes);
11070 	    }
11071 	}
11072       pat = newpat;
11073     }
11074 
11075   if (insn_code_number >= 0
11076       && insn_code_number != NOOP_MOVE_INSN_CODE)
11077     {
11078       old_pat = PATTERN (insn);
11079       old_notes = REG_NOTES (insn);
11080       old_icode = INSN_CODE (insn);
11081       PATTERN (insn) = pat;
11082       REG_NOTES (insn) = notes;
11083 
11084       /* Allow targets to reject combined insn.  */
11085       if (!targetm.legitimate_combined_insn (insn))
11086 	{
11087 	  if (dump_file && (dump_flags & TDF_DETAILS))
11088 	    fputs ("Instruction not appropriate for target.",
11089 		   dump_file);
11090 
11091 	  /* Callers expect recog_for_combine to strip
11092 	     clobbers from the pattern on failure.  */
11093 	  pat = pat_without_clobbers;
11094 	  notes = NULL_RTX;
11095 
11096 	  insn_code_number = -1;
11097 	}
11098 
11099       PATTERN (insn) = old_pat;
11100       REG_NOTES (insn) = old_notes;
11101       INSN_CODE (insn) = old_icode;
11102     }
11103 
11104   *pnewpat = pat;
11105   *pnotes = notes;
11106 
11107   return insn_code_number;
11108 }
11109 
11110 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11111    expressed as an AND and maybe an LSHIFTRT, to that formulation.
11112    Return whether anything was so changed.  */
11113 
11114 static bool
11115 change_zero_ext (rtx *src)
11116 {
11117   bool changed = false;
11118 
11119   subrtx_ptr_iterator::array_type array;
11120   FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11121     {
11122       rtx x = **iter;
11123       machine_mode mode = GET_MODE (x);
11124       int size;
11125 
11126       if (GET_CODE (x) == ZERO_EXTRACT
11127 	  && CONST_INT_P (XEXP (x, 1))
11128 	  && CONST_INT_P (XEXP (x, 2))
11129 	  && GET_MODE (XEXP (x, 0)) == mode)
11130 	{
11131 	  size = INTVAL (XEXP (x, 1));
11132 
11133 	  int start = INTVAL (XEXP (x, 2));
11134 	  if (BITS_BIG_ENDIAN)
11135 	    start = GET_MODE_PRECISION (mode) - size - start;
11136 
11137 	  x = simplify_gen_binary (LSHIFTRT, mode,
11138 				   XEXP (x, 0), GEN_INT (start));
11139 	}
11140       else if (GET_CODE (x) == ZERO_EXTEND
11141 	       && GET_CODE (XEXP (x, 0)) == SUBREG
11142 	       && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
11143 	       && subreg_lowpart_p (XEXP (x, 0)))
11144 	{
11145 	  size = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
11146 	  x = SUBREG_REG (XEXP (x, 0));
11147 	}
11148       else
11149 	continue;
11150 
11151       unsigned HOST_WIDE_INT mask = 1;
11152       mask <<= size;
11153       mask--;
11154 
11155       x = gen_rtx_AND (mode, x, GEN_INT (mask));
11156 
11157       SUBST (**iter, x);
11158       changed = true;
11159     }
11160 
11161   return changed;
11162 }
11163 
11164 /* Like recog, but we receive the address of a pointer to a new pattern.
11165    We try to match the rtx that the pointer points to.
11166    If that fails, we may try to modify or replace the pattern,
11167    storing the replacement into the same pointer object.
11168 
11169    Modifications include deletion or addition of CLOBBERs.  If the
11170    instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11171    to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11172    (and undo if that fails).
11173 
11174    PNOTES is a pointer to a location where any REG_UNUSED notes added for
11175    the CLOBBERs are placed.
11176 
11177    The value is the final insn code from the pattern ultimately matched,
11178    or -1.  */
11179 
11180 static int
11181 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11182 {
11183   rtx pat = PATTERN (insn);
11184   int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11185   if (insn_code_number >= 0 || check_asm_operands (pat))
11186     return insn_code_number;
11187 
11188   void *marker = get_undo_marker ();
11189   bool changed = false;
11190 
11191   if (GET_CODE (pat) == SET)
11192     changed = change_zero_ext (&SET_SRC (pat));
11193   else if (GET_CODE (pat) == PARALLEL)
11194     {
11195       int i;
11196       for (i = 0; i < XVECLEN (pat, 0); i++)
11197 	{
11198 	  rtx set = XVECEXP (pat, 0, i);
11199 	  if (GET_CODE (set) == SET)
11200 	    changed |= change_zero_ext (&SET_SRC (set));
11201 	}
11202     }
11203 
11204   if (changed)
11205     {
11206       insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11207 
11208       if (insn_code_number < 0)
11209 	undo_to_marker (marker);
11210     }
11211 
11212   return insn_code_number;
11213 }
11214 
11215 /* Like gen_lowpart_general but for use by combine.  In combine it
11216    is not possible to create any new pseudoregs.  However, it is
11217    safe to create invalid memory addresses, because combine will
11218    try to recognize them and all they will do is make the combine
11219    attempt fail.
11220 
11221    If for some reason this cannot do its job, an rtx
11222    (clobber (const_int 0)) is returned.
11223    An insn containing that will not be recognized.  */
11224 
11225 static rtx
11226 gen_lowpart_for_combine (machine_mode omode, rtx x)
11227 {
11228   machine_mode imode = GET_MODE (x);
11229   unsigned int osize = GET_MODE_SIZE (omode);
11230   unsigned int isize = GET_MODE_SIZE (imode);
11231   rtx result;
11232 
11233   if (omode == imode)
11234     return x;
11235 
11236   /* We can only support MODE being wider than a word if X is a
11237      constant integer or has a mode the same size.  */
11238   if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11239       && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11240     goto fail;
11241 
11242   /* X might be a paradoxical (subreg (mem)).  In that case, gen_lowpart
11243      won't know what to do.  So we will strip off the SUBREG here and
11244      process normally.  */
11245   if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11246     {
11247       x = SUBREG_REG (x);
11248 
11249       /* For use in case we fall down into the address adjustments
11250 	 further below, we need to adjust the known mode and size of
11251 	 x; imode and isize, since we just adjusted x.  */
11252       imode = GET_MODE (x);
11253 
11254       if (imode == omode)
11255 	return x;
11256 
11257       isize = GET_MODE_SIZE (imode);
11258     }
11259 
11260   result = gen_lowpart_common (omode, x);
11261 
11262   if (result)
11263     return result;
11264 
11265   if (MEM_P (x))
11266     {
11267       int offset = 0;
11268 
11269       /* Refuse to work on a volatile memory ref or one with a mode-dependent
11270 	 address.  */
11271       if (MEM_VOLATILE_P (x)
11272 	  || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11273 	goto fail;
11274 
11275       /* If we want to refer to something bigger than the original memref,
11276 	 generate a paradoxical subreg instead.  That will force a reload
11277 	 of the original memref X.  */
11278       if (isize < osize)
11279 	return gen_rtx_SUBREG (omode, x, 0);
11280 
11281       if (WORDS_BIG_ENDIAN)
11282 	offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11283 
11284       /* Adjust the address so that the address-after-the-data is
11285 	 unchanged.  */
11286       if (BYTES_BIG_ENDIAN)
11287 	offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11288 
11289       return adjust_address_nv (x, omode, offset);
11290     }
11291 
11292   /* If X is a comparison operator, rewrite it in a new mode.  This
11293      probably won't match, but may allow further simplifications.  */
11294   else if (COMPARISON_P (x))
11295     return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11296 
11297   /* If we couldn't simplify X any other way, just enclose it in a
11298      SUBREG.  Normally, this SUBREG won't match, but some patterns may
11299      include an explicit SUBREG or we may simplify it further in combine.  */
11300   else
11301     {
11302       rtx res;
11303 
11304       if (imode == VOIDmode)
11305 	{
11306 	  imode = int_mode_for_mode (omode);
11307 	  x = gen_lowpart_common (imode, x);
11308 	  if (x == NULL)
11309 	    goto fail;
11310 	}
11311       res = lowpart_subreg (omode, x, imode);
11312       if (res)
11313 	return res;
11314     }
11315 
11316  fail:
11317   return gen_rtx_CLOBBER (omode, const0_rtx);
11318 }
11319 
11320 /* Try to simplify a comparison between OP0 and a constant OP1,
11321    where CODE is the comparison code that will be tested, into a
11322    (CODE OP0 const0_rtx) form.
11323 
11324    The result is a possibly different comparison code to use.
11325    *POP1 may be updated.  */
11326 
11327 static enum rtx_code
11328 simplify_compare_const (enum rtx_code code, machine_mode mode,
11329 			rtx op0, rtx *pop1)
11330 {
11331   unsigned int mode_width = GET_MODE_PRECISION (mode);
11332   HOST_WIDE_INT const_op = INTVAL (*pop1);
11333 
11334   /* Get the constant we are comparing against and turn off all bits
11335      not on in our mode.  */
11336   if (mode != VOIDmode)
11337     const_op = trunc_int_for_mode (const_op, mode);
11338 
11339   /* If we are comparing against a constant power of two and the value
11340      being compared can only have that single bit nonzero (e.g., it was
11341      `and'ed with that bit), we can replace this with a comparison
11342      with zero.  */
11343   if (const_op
11344       && (code == EQ || code == NE || code == GE || code == GEU
11345 	  || code == LT || code == LTU)
11346       && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11347       && exact_log2 (const_op & GET_MODE_MASK (mode)) >= 0
11348       && (nonzero_bits (op0, mode)
11349 	  == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
11350     {
11351       code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11352       const_op = 0;
11353     }
11354 
11355   /* Similarly, if we are comparing a value known to be either -1 or
11356      0 with -1, change it to the opposite comparison against zero.  */
11357   if (const_op == -1
11358       && (code == EQ || code == NE || code == GT || code == LE
11359 	  || code == GEU || code == LTU)
11360       && num_sign_bit_copies (op0, mode) == mode_width)
11361     {
11362       code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11363       const_op = 0;
11364     }
11365 
11366   /* Do some canonicalizations based on the comparison code.  We prefer
11367      comparisons against zero and then prefer equality comparisons.
11368      If we can reduce the size of a constant, we will do that too.  */
11369   switch (code)
11370     {
11371     case LT:
11372       /* < C is equivalent to <= (C - 1) */
11373       if (const_op > 0)
11374 	{
11375 	  const_op -= 1;
11376 	  code = LE;
11377 	  /* ... fall through to LE case below.  */
11378 	}
11379       else
11380 	break;
11381 
11382     case LE:
11383       /* <= C is equivalent to < (C + 1); we do this for C < 0  */
11384       if (const_op < 0)
11385 	{
11386 	  const_op += 1;
11387 	  code = LT;
11388 	}
11389 
11390       /* If we are doing a <= 0 comparison on a value known to have
11391 	 a zero sign bit, we can replace this with == 0.  */
11392       else if (const_op == 0
11393 	       && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11394 	       && (nonzero_bits (op0, mode)
11395 		   & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11396 	       == 0)
11397 	code = EQ;
11398       break;
11399 
11400     case GE:
11401       /* >= C is equivalent to > (C - 1).  */
11402       if (const_op > 0)
11403 	{
11404 	  const_op -= 1;
11405 	  code = GT;
11406 	  /* ... fall through to GT below.  */
11407 	}
11408       else
11409 	break;
11410 
11411     case GT:
11412       /* > C is equivalent to >= (C + 1); we do this for C < 0.  */
11413       if (const_op < 0)
11414 	{
11415 	  const_op += 1;
11416 	  code = GE;
11417 	}
11418 
11419       /* If we are doing a > 0 comparison on a value known to have
11420 	 a zero sign bit, we can replace this with != 0.  */
11421       else if (const_op == 0
11422 	       && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11423 	       && (nonzero_bits (op0, mode)
11424 		   & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11425 	       == 0)
11426 	code = NE;
11427       break;
11428 
11429     case LTU:
11430       /* < C is equivalent to <= (C - 1).  */
11431       if (const_op > 0)
11432 	{
11433 	  const_op -= 1;
11434 	  code = LEU;
11435 	  /* ... fall through ...  */
11436 	}
11437       /* (unsigned) < 0x80000000 is equivalent to >= 0.  */
11438       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11439 	       && (unsigned HOST_WIDE_INT) const_op
11440 	       == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11441 	{
11442 	  const_op = 0;
11443 	  code = GE;
11444 	  break;
11445 	}
11446       else
11447 	break;
11448 
11449     case LEU:
11450       /* unsigned <= 0 is equivalent to == 0 */
11451       if (const_op == 0)
11452 	code = EQ;
11453       /* (unsigned) <= 0x7fffffff is equivalent to >= 0.  */
11454       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11455 	       && (unsigned HOST_WIDE_INT) const_op
11456 	       == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11457 	{
11458 	  const_op = 0;
11459 	  code = GE;
11460 	}
11461       break;
11462 
11463     case GEU:
11464       /* >= C is equivalent to > (C - 1).  */
11465       if (const_op > 1)
11466 	{
11467 	  const_op -= 1;
11468 	  code = GTU;
11469 	  /* ... fall through ...  */
11470 	}
11471 
11472       /* (unsigned) >= 0x80000000 is equivalent to < 0.  */
11473       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11474 	       && (unsigned HOST_WIDE_INT) const_op
11475 	       == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11476 	{
11477 	  const_op = 0;
11478 	  code = LT;
11479 	  break;
11480 	}
11481       else
11482 	break;
11483 
11484     case GTU:
11485       /* unsigned > 0 is equivalent to != 0 */
11486       if (const_op == 0)
11487 	code = NE;
11488       /* (unsigned) > 0x7fffffff is equivalent to < 0.  */
11489       else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11490 	       && (unsigned HOST_WIDE_INT) const_op
11491 	       == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11492 	{
11493 	  const_op = 0;
11494 	  code = LT;
11495 	}
11496       break;
11497 
11498     default:
11499       break;
11500     }
11501 
11502   *pop1 = GEN_INT (const_op);
11503   return code;
11504 }
11505 
11506 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11507    comparison code that will be tested.
11508 
11509    The result is a possibly different comparison code to use.  *POP0 and
11510    *POP1 may be updated.
11511 
11512    It is possible that we might detect that a comparison is either always
11513    true or always false.  However, we do not perform general constant
11514    folding in combine, so this knowledge isn't useful.  Such tautologies
11515    should have been detected earlier.  Hence we ignore all such cases.  */
11516 
11517 static enum rtx_code
11518 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11519 {
11520   rtx op0 = *pop0;
11521   rtx op1 = *pop1;
11522   rtx tem, tem1;
11523   int i;
11524   machine_mode mode, tmode;
11525 
11526   /* Try a few ways of applying the same transformation to both operands.  */
11527   while (1)
11528     {
11529       /* The test below this one won't handle SIGN_EXTENDs on these machines,
11530 	 so check specially.  */
11531       if (!WORD_REGISTER_OPERATIONS
11532 	  && code != GTU && code != GEU && code != LTU && code != LEU
11533 	  && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11534 	  && GET_CODE (XEXP (op0, 0)) == ASHIFT
11535 	  && GET_CODE (XEXP (op1, 0)) == ASHIFT
11536 	  && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11537 	  && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11538 	  && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11539 	      == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11540 	  && CONST_INT_P (XEXP (op0, 1))
11541 	  && XEXP (op0, 1) == XEXP (op1, 1)
11542 	  && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11543 	  && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11544 	  && (INTVAL (XEXP (op0, 1))
11545 	      == (GET_MODE_PRECISION (GET_MODE (op0))
11546 		  - (GET_MODE_PRECISION
11547 		     (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11548 	{
11549 	  op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11550 	  op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11551 	}
11552 
11553       /* If both operands are the same constant shift, see if we can ignore the
11554 	 shift.  We can if the shift is a rotate or if the bits shifted out of
11555 	 this shift are known to be zero for both inputs and if the type of
11556 	 comparison is compatible with the shift.  */
11557       if (GET_CODE (op0) == GET_CODE (op1)
11558 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11559 	  && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11560 	      || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11561 		  && (code != GT && code != LT && code != GE && code != LE))
11562 	      || (GET_CODE (op0) == ASHIFTRT
11563 		  && (code != GTU && code != LTU
11564 		      && code != GEU && code != LEU)))
11565 	  && CONST_INT_P (XEXP (op0, 1))
11566 	  && INTVAL (XEXP (op0, 1)) >= 0
11567 	  && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11568 	  && XEXP (op0, 1) == XEXP (op1, 1))
11569 	{
11570 	  machine_mode mode = GET_MODE (op0);
11571 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11572 	  int shift_count = INTVAL (XEXP (op0, 1));
11573 
11574 	  if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11575 	    mask &= (mask >> shift_count) << shift_count;
11576 	  else if (GET_CODE (op0) == ASHIFT)
11577 	    mask = (mask & (mask << shift_count)) >> shift_count;
11578 
11579 	  if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11580 	      && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11581 	    op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11582 	  else
11583 	    break;
11584 	}
11585 
11586       /* If both operands are AND's of a paradoxical SUBREG by constant, the
11587 	 SUBREGs are of the same mode, and, in both cases, the AND would
11588 	 be redundant if the comparison was done in the narrower mode,
11589 	 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11590 	 and the operand's possibly nonzero bits are 0xffffff01; in that case
11591 	 if we only care about QImode, we don't need the AND).  This case
11592 	 occurs if the output mode of an scc insn is not SImode and
11593 	 STORE_FLAG_VALUE == 1 (e.g., the 386).
11594 
11595 	 Similarly, check for a case where the AND's are ZERO_EXTEND
11596 	 operations from some narrower mode even though a SUBREG is not
11597 	 present.  */
11598 
11599       else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11600 	       && CONST_INT_P (XEXP (op0, 1))
11601 	       && CONST_INT_P (XEXP (op1, 1)))
11602 	{
11603 	  rtx inner_op0 = XEXP (op0, 0);
11604 	  rtx inner_op1 = XEXP (op1, 0);
11605 	  HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11606 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11607 	  int changed = 0;
11608 
11609 	  if (paradoxical_subreg_p (inner_op0)
11610 	      && GET_CODE (inner_op1) == SUBREG
11611 	      && (GET_MODE (SUBREG_REG (inner_op0))
11612 		  == GET_MODE (SUBREG_REG (inner_op1)))
11613 	      && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11614 		  <= HOST_BITS_PER_WIDE_INT)
11615 	      && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11616 					     GET_MODE (SUBREG_REG (inner_op0)))))
11617 	      && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11618 					     GET_MODE (SUBREG_REG (inner_op1))))))
11619 	    {
11620 	      op0 = SUBREG_REG (inner_op0);
11621 	      op1 = SUBREG_REG (inner_op1);
11622 
11623 	      /* The resulting comparison is always unsigned since we masked
11624 		 off the original sign bit.  */
11625 	      code = unsigned_condition (code);
11626 
11627 	      changed = 1;
11628 	    }
11629 
11630 	  else if (c0 == c1)
11631 	    for (tmode = GET_CLASS_NARROWEST_MODE
11632 		 (GET_MODE_CLASS (GET_MODE (op0)));
11633 		 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11634 	      if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11635 		{
11636 		  op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11637 		  op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11638 		  code = unsigned_condition (code);
11639 		  changed = 1;
11640 		  break;
11641 		}
11642 
11643 	  if (! changed)
11644 	    break;
11645 	}
11646 
11647       /* If both operands are NOT, we can strip off the outer operation
11648 	 and adjust the comparison code for swapped operands; similarly for
11649 	 NEG, except that this must be an equality comparison.  */
11650       else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11651 	       || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11652 		   && (code == EQ || code == NE)))
11653 	op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11654 
11655       else
11656 	break;
11657     }
11658 
11659   /* If the first operand is a constant, swap the operands and adjust the
11660      comparison code appropriately, but don't do this if the second operand
11661      is already a constant integer.  */
11662   if (swap_commutative_operands_p (op0, op1))
11663     {
11664       std::swap (op0, op1);
11665       code = swap_condition (code);
11666     }
11667 
11668   /* We now enter a loop during which we will try to simplify the comparison.
11669      For the most part, we only are concerned with comparisons with zero,
11670      but some things may really be comparisons with zero but not start
11671      out looking that way.  */
11672 
11673   while (CONST_INT_P (op1))
11674     {
11675       machine_mode mode = GET_MODE (op0);
11676       unsigned int mode_width = GET_MODE_PRECISION (mode);
11677       unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11678       int equality_comparison_p;
11679       int sign_bit_comparison_p;
11680       int unsigned_comparison_p;
11681       HOST_WIDE_INT const_op;
11682 
11683       /* We only want to handle integral modes.  This catches VOIDmode,
11684 	 CCmode, and the floating-point modes.  An exception is that we
11685 	 can handle VOIDmode if OP0 is a COMPARE or a comparison
11686 	 operation.  */
11687 
11688       if (GET_MODE_CLASS (mode) != MODE_INT
11689 	  && ! (mode == VOIDmode
11690 		&& (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11691 	break;
11692 
11693       /* Try to simplify the compare to constant, possibly changing the
11694 	 comparison op, and/or changing op1 to zero.  */
11695       code = simplify_compare_const (code, mode, op0, &op1);
11696       const_op = INTVAL (op1);
11697 
11698       /* Compute some predicates to simplify code below.  */
11699 
11700       equality_comparison_p = (code == EQ || code == NE);
11701       sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11702       unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11703 			       || code == GEU);
11704 
11705       /* If this is a sign bit comparison and we can do arithmetic in
11706 	 MODE, say that we will only be needing the sign bit of OP0.  */
11707       if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11708 	op0 = force_to_mode (op0, mode,
11709 			     (unsigned HOST_WIDE_INT) 1
11710 			     << (GET_MODE_PRECISION (mode) - 1),
11711 			     0);
11712 
11713       /* Now try cases based on the opcode of OP0.  If none of the cases
11714 	 does a "continue", we exit this loop immediately after the
11715 	 switch.  */
11716 
11717       switch (GET_CODE (op0))
11718 	{
11719 	case ZERO_EXTRACT:
11720 	  /* If we are extracting a single bit from a variable position in
11721 	     a constant that has only a single bit set and are comparing it
11722 	     with zero, we can convert this into an equality comparison
11723 	     between the position and the location of the single bit.  */
11724 	  /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11725 	     have already reduced the shift count modulo the word size.  */
11726 	  if (!SHIFT_COUNT_TRUNCATED
11727 	      && CONST_INT_P (XEXP (op0, 0))
11728 	      && XEXP (op0, 1) == const1_rtx
11729 	      && equality_comparison_p && const_op == 0
11730 	      && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11731 	    {
11732 	      if (BITS_BIG_ENDIAN)
11733 		i = BITS_PER_WORD - 1 - i;
11734 
11735 	      op0 = XEXP (op0, 2);
11736 	      op1 = GEN_INT (i);
11737 	      const_op = i;
11738 
11739 	      /* Result is nonzero iff shift count is equal to I.  */
11740 	      code = reverse_condition (code);
11741 	      continue;
11742 	    }
11743 
11744 	  /* ... fall through ...  */
11745 
11746 	case SIGN_EXTRACT:
11747 	  tem = expand_compound_operation (op0);
11748 	  if (tem != op0)
11749 	    {
11750 	      op0 = tem;
11751 	      continue;
11752 	    }
11753 	  break;
11754 
11755 	case NOT:
11756 	  /* If testing for equality, we can take the NOT of the constant.  */
11757 	  if (equality_comparison_p
11758 	      && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11759 	    {
11760 	      op0 = XEXP (op0, 0);
11761 	      op1 = tem;
11762 	      continue;
11763 	    }
11764 
11765 	  /* If just looking at the sign bit, reverse the sense of the
11766 	     comparison.  */
11767 	  if (sign_bit_comparison_p)
11768 	    {
11769 	      op0 = XEXP (op0, 0);
11770 	      code = (code == GE ? LT : GE);
11771 	      continue;
11772 	    }
11773 	  break;
11774 
11775 	case NEG:
11776 	  /* If testing for equality, we can take the NEG of the constant.  */
11777 	  if (equality_comparison_p
11778 	      && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11779 	    {
11780 	      op0 = XEXP (op0, 0);
11781 	      op1 = tem;
11782 	      continue;
11783 	    }
11784 
11785 	  /* The remaining cases only apply to comparisons with zero.  */
11786 	  if (const_op != 0)
11787 	    break;
11788 
11789 	  /* When X is ABS or is known positive,
11790 	     (neg X) is < 0 if and only if X != 0.  */
11791 
11792 	  if (sign_bit_comparison_p
11793 	      && (GET_CODE (XEXP (op0, 0)) == ABS
11794 		  || (mode_width <= HOST_BITS_PER_WIDE_INT
11795 		      && (nonzero_bits (XEXP (op0, 0), mode)
11796 			  & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11797 			 == 0)))
11798 	    {
11799 	      op0 = XEXP (op0, 0);
11800 	      code = (code == LT ? NE : EQ);
11801 	      continue;
11802 	    }
11803 
11804 	  /* If we have NEG of something whose two high-order bits are the
11805 	     same, we know that "(-a) < 0" is equivalent to "a > 0".  */
11806 	  if (num_sign_bit_copies (op0, mode) >= 2)
11807 	    {
11808 	      op0 = XEXP (op0, 0);
11809 	      code = swap_condition (code);
11810 	      continue;
11811 	    }
11812 	  break;
11813 
11814 	case ROTATE:
11815 	  /* If we are testing equality and our count is a constant, we
11816 	     can perform the inverse operation on our RHS.  */
11817 	  if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11818 	      && (tem = simplify_binary_operation (ROTATERT, mode,
11819 						   op1, XEXP (op0, 1))) != 0)
11820 	    {
11821 	      op0 = XEXP (op0, 0);
11822 	      op1 = tem;
11823 	      continue;
11824 	    }
11825 
11826 	  /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11827 	     a particular bit.  Convert it to an AND of a constant of that
11828 	     bit.  This will be converted into a ZERO_EXTRACT.  */
11829 	  if (const_op == 0 && sign_bit_comparison_p
11830 	      && CONST_INT_P (XEXP (op0, 1))
11831 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
11832 	    {
11833 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11834 					    ((unsigned HOST_WIDE_INT) 1
11835 					     << (mode_width - 1
11836 						 - INTVAL (XEXP (op0, 1)))));
11837 	      code = (code == LT ? NE : EQ);
11838 	      continue;
11839 	    }
11840 
11841 	  /* Fall through.  */
11842 
11843 	case ABS:
11844 	  /* ABS is ignorable inside an equality comparison with zero.  */
11845 	  if (const_op == 0 && equality_comparison_p)
11846 	    {
11847 	      op0 = XEXP (op0, 0);
11848 	      continue;
11849 	    }
11850 	  break;
11851 
11852 	case SIGN_EXTEND:
11853 	  /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11854 	     (compare FOO CONST) if CONST fits in FOO's mode and we
11855 	     are either testing inequality or have an unsigned
11856 	     comparison with ZERO_EXTEND or a signed comparison with
11857 	     SIGN_EXTEND.  But don't do it if we don't have a compare
11858 	     insn of the given mode, since we'd have to revert it
11859 	     later on, and then we wouldn't know whether to sign- or
11860 	     zero-extend.  */
11861 	  mode = GET_MODE (XEXP (op0, 0));
11862 	  if (GET_MODE_CLASS (mode) == MODE_INT
11863 	      && ! unsigned_comparison_p
11864 	      && HWI_COMPUTABLE_MODE_P (mode)
11865 	      && trunc_int_for_mode (const_op, mode) == const_op
11866 	      && have_insn_for (COMPARE, mode))
11867 	    {
11868 	      op0 = XEXP (op0, 0);
11869 	      continue;
11870 	    }
11871 	  break;
11872 
11873 	case SUBREG:
11874 	  /* Check for the case where we are comparing A - C1 with C2, that is
11875 
11876 	       (subreg:MODE (plus (A) (-C1))) op (C2)
11877 
11878 	     with C1 a constant, and try to lift the SUBREG, i.e. to do the
11879 	     comparison in the wider mode.  One of the following two conditions
11880 	     must be true in order for this to be valid:
11881 
11882 	       1. The mode extension results in the same bit pattern being added
11883 		  on both sides and the comparison is equality or unsigned.  As
11884 		  C2 has been truncated to fit in MODE, the pattern can only be
11885 		  all 0s or all 1s.
11886 
11887 	       2. The mode extension results in the sign bit being copied on
11888 		  each side.
11889 
11890 	     The difficulty here is that we have predicates for A but not for
11891 	     (A - C1) so we need to check that C1 is within proper bounds so
11892 	     as to perturbate A as little as possible.  */
11893 
11894 	  if (mode_width <= HOST_BITS_PER_WIDE_INT
11895 	      && subreg_lowpart_p (op0)
11896 	      && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11897 	      && GET_CODE (SUBREG_REG (op0)) == PLUS
11898 	      && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11899 	    {
11900 	      machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11901 	      rtx a = XEXP (SUBREG_REG (op0), 0);
11902 	      HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11903 
11904 	      if ((c1 > 0
11905 		   && (unsigned HOST_WIDE_INT) c1
11906 		       < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11907 		   && (equality_comparison_p || unsigned_comparison_p)
11908 		   /* (A - C1) zero-extends if it is positive and sign-extends
11909 		      if it is negative, C2 both zero- and sign-extends.  */
11910 		   && ((0 == (nonzero_bits (a, inner_mode)
11911 			      & ~GET_MODE_MASK (mode))
11912 			&& const_op >= 0)
11913 		       /* (A - C1) sign-extends if it is positive and 1-extends
11914 			  if it is negative, C2 both sign- and 1-extends.  */
11915 		       || (num_sign_bit_copies (a, inner_mode)
11916 			   > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11917 					     - mode_width)
11918 			   && const_op < 0)))
11919 		  || ((unsigned HOST_WIDE_INT) c1
11920 		       < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11921 		      /* (A - C1) always sign-extends, like C2.  */
11922 		      && num_sign_bit_copies (a, inner_mode)
11923 			 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11924 					   - (mode_width - 1))))
11925 		{
11926 		  op0 = SUBREG_REG (op0);
11927 		  continue;
11928 		}
11929 	    }
11930 
11931 	  /* If the inner mode is narrower and we are extracting the low part,
11932 	     we can treat the SUBREG as if it were a ZERO_EXTEND.  */
11933 	  if (subreg_lowpart_p (op0)
11934 	      && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11935 	    /* Fall through */ ;
11936 	  else
11937 	    break;
11938 
11939 	  /* ... fall through ...  */
11940 
11941 	case ZERO_EXTEND:
11942 	  mode = GET_MODE (XEXP (op0, 0));
11943 	  if (GET_MODE_CLASS (mode) == MODE_INT
11944 	      && (unsigned_comparison_p || equality_comparison_p)
11945 	      && HWI_COMPUTABLE_MODE_P (mode)
11946 	      && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11947 	      && const_op >= 0
11948 	      && have_insn_for (COMPARE, mode))
11949 	    {
11950 	      op0 = XEXP (op0, 0);
11951 	      continue;
11952 	    }
11953 	  break;
11954 
11955 	case PLUS:
11956 	  /* (eq (plus X A) B) -> (eq X (minus B A)).  We can only do
11957 	     this for equality comparisons due to pathological cases involving
11958 	     overflows.  */
11959 	  if (equality_comparison_p
11960 	      && 0 != (tem = simplify_binary_operation (MINUS, mode,
11961 							op1, XEXP (op0, 1))))
11962 	    {
11963 	      op0 = XEXP (op0, 0);
11964 	      op1 = tem;
11965 	      continue;
11966 	    }
11967 
11968 	  /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0.  */
11969 	  if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11970 	      && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11971 	    {
11972 	      op0 = XEXP (XEXP (op0, 0), 0);
11973 	      code = (code == LT ? EQ : NE);
11974 	      continue;
11975 	    }
11976 	  break;
11977 
11978 	case MINUS:
11979 	  /* We used to optimize signed comparisons against zero, but that
11980 	     was incorrect.  Unsigned comparisons against zero (GTU, LEU)
11981 	     arrive here as equality comparisons, or (GEU, LTU) are
11982 	     optimized away.  No need to special-case them.  */
11983 
11984 	  /* (eq (minus A B) C) -> (eq A (plus B C)) or
11985 	     (eq B (minus A C)), whichever simplifies.  We can only do
11986 	     this for equality comparisons due to pathological cases involving
11987 	     overflows.  */
11988 	  if (equality_comparison_p
11989 	      && 0 != (tem = simplify_binary_operation (PLUS, mode,
11990 							XEXP (op0, 1), op1)))
11991 	    {
11992 	      op0 = XEXP (op0, 0);
11993 	      op1 = tem;
11994 	      continue;
11995 	    }
11996 
11997 	  if (equality_comparison_p
11998 	      && 0 != (tem = simplify_binary_operation (MINUS, mode,
11999 							XEXP (op0, 0), op1)))
12000 	    {
12001 	      op0 = XEXP (op0, 1);
12002 	      op1 = tem;
12003 	      continue;
12004 	    }
12005 
12006 	  /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12007 	     of bits in X minus 1, is one iff X > 0.  */
12008 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12009 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12010 	      && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12011 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12012 	    {
12013 	      op0 = XEXP (op0, 1);
12014 	      code = (code == GE ? LE : GT);
12015 	      continue;
12016 	    }
12017 	  break;
12018 
12019 	case XOR:
12020 	  /* (eq (xor A B) C) -> (eq A (xor B C)).  This is a simplification
12021 	     if C is zero or B is a constant.  */
12022 	  if (equality_comparison_p
12023 	      && 0 != (tem = simplify_binary_operation (XOR, mode,
12024 							XEXP (op0, 1), op1)))
12025 	    {
12026 	      op0 = XEXP (op0, 0);
12027 	      op1 = tem;
12028 	      continue;
12029 	    }
12030 	  break;
12031 
12032 	case EQ:  case NE:
12033 	case UNEQ:  case LTGT:
12034 	case LT:  case LTU:  case UNLT:  case LE:  case LEU:  case UNLE:
12035 	case GT:  case GTU:  case UNGT:  case GE:  case GEU:  case UNGE:
12036 	case UNORDERED: case ORDERED:
12037 	  /* We can't do anything if OP0 is a condition code value, rather
12038 	     than an actual data value.  */
12039 	  if (const_op != 0
12040 	      || CC0_P (XEXP (op0, 0))
12041 	      || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12042 	    break;
12043 
12044 	  /* Get the two operands being compared.  */
12045 	  if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12046 	    tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12047 	  else
12048 	    tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12049 
12050 	  /* Check for the cases where we simply want the result of the
12051 	     earlier test or the opposite of that result.  */
12052 	  if (code == NE || code == EQ
12053 	      || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
12054 		  && (code == LT || code == GE)))
12055 	    {
12056 	      enum rtx_code new_code;
12057 	      if (code == LT || code == NE)
12058 		new_code = GET_CODE (op0);
12059 	      else
12060 		new_code = reversed_comparison_code (op0, NULL);
12061 
12062 	      if (new_code != UNKNOWN)
12063 		{
12064 		  code = new_code;
12065 		  op0 = tem;
12066 		  op1 = tem1;
12067 		  continue;
12068 		}
12069 	    }
12070 	  break;
12071 
12072 	case IOR:
12073 	  /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12074 	     iff X <= 0.  */
12075 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12076 	      && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12077 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12078 	    {
12079 	      op0 = XEXP (op0, 1);
12080 	      code = (code == GE ? GT : LE);
12081 	      continue;
12082 	    }
12083 	  break;
12084 
12085 	case AND:
12086 	  /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1).  This
12087 	     will be converted to a ZERO_EXTRACT later.  */
12088 	  if (const_op == 0 && equality_comparison_p
12089 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12090 	      && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12091 	    {
12092 	      op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12093 				      XEXP (XEXP (op0, 0), 1));
12094 	      op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12095 	      continue;
12096 	    }
12097 
12098 	  /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12099 	     zero and X is a comparison and C1 and C2 describe only bits set
12100 	     in STORE_FLAG_VALUE, we can compare with X.  */
12101 	  if (const_op == 0 && equality_comparison_p
12102 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12103 	      && CONST_INT_P (XEXP (op0, 1))
12104 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12105 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12106 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12107 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12108 	    {
12109 	      mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12110 		      << INTVAL (XEXP (XEXP (op0, 0), 1)));
12111 	      if ((~STORE_FLAG_VALUE & mask) == 0
12112 		  && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12113 		      || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12114 			  && COMPARISON_P (tem))))
12115 		{
12116 		  op0 = XEXP (XEXP (op0, 0), 0);
12117 		  continue;
12118 		}
12119 	    }
12120 
12121 	  /* If we are doing an equality comparison of an AND of a bit equal
12122 	     to the sign bit, replace this with a LT or GE comparison of
12123 	     the underlying value.  */
12124 	  if (equality_comparison_p
12125 	      && const_op == 0
12126 	      && CONST_INT_P (XEXP (op0, 1))
12127 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12128 	      && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12129 		  == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
12130 	    {
12131 	      op0 = XEXP (op0, 0);
12132 	      code = (code == EQ ? GE : LT);
12133 	      continue;
12134 	    }
12135 
12136 	  /* If this AND operation is really a ZERO_EXTEND from a narrower
12137 	     mode, the constant fits within that mode, and this is either an
12138 	     equality or unsigned comparison, try to do this comparison in
12139 	     the narrower mode.
12140 
12141 	     Note that in:
12142 
12143 	     (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12144 	     -> (ne:DI (reg:SI 4) (const_int 0))
12145 
12146 	     unless TRULY_NOOP_TRUNCATION allows it or the register is
12147 	     known to hold a value of the required mode the
12148 	     transformation is invalid.  */
12149 	  if ((equality_comparison_p || unsigned_comparison_p)
12150 	      && CONST_INT_P (XEXP (op0, 1))
12151 	      && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12152 				   & GET_MODE_MASK (mode))
12153 				  + 1)) >= 0
12154 	      && const_op >> i == 0
12155 	      && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode)
12156 	    {
12157 	      op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12158 	      continue;
12159 	    }
12160 
12161 	  /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12162 	     fits in both M1 and M2 and the SUBREG is either paradoxical
12163 	     or represents the low part, permute the SUBREG and the AND
12164 	     and try again.  */
12165 	  if (GET_CODE (XEXP (op0, 0)) == SUBREG
12166 	      && CONST_INT_P (XEXP (op0, 1)))
12167 	    {
12168 	      tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
12169 	      unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12170 	      /* Require an integral mode, to avoid creating something like
12171 		 (AND:SF ...).  */
12172 	      if (SCALAR_INT_MODE_P (tmode)
12173 		  /* It is unsafe to commute the AND into the SUBREG if the
12174 		     SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12175 		     not defined.  As originally written the upper bits
12176 		     have a defined value due to the AND operation.
12177 		     However, if we commute the AND inside the SUBREG then
12178 		     they no longer have defined values and the meaning of
12179 		     the code has been changed.
12180 		     Also C1 should not change value in the smaller mode,
12181 		     see PR67028 (a positive C1 can become negative in the
12182 		     smaller mode, so that the AND does no longer mask the
12183 		     upper bits).  */
12184 		  && ((WORD_REGISTER_OPERATIONS
12185 		       && mode_width > GET_MODE_PRECISION (tmode)
12186 		       && mode_width <= BITS_PER_WORD
12187 		       && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12188 		      || (mode_width <= GET_MODE_PRECISION (tmode)
12189 			  && subreg_lowpart_p (XEXP (op0, 0))))
12190 		  && mode_width <= HOST_BITS_PER_WIDE_INT
12191 		  && HWI_COMPUTABLE_MODE_P (tmode)
12192 		  && (c1 & ~mask) == 0
12193 		  && (c1 & ~GET_MODE_MASK (tmode)) == 0
12194 		  && c1 != mask
12195 		  && c1 != GET_MODE_MASK (tmode))
12196 		{
12197 		  op0 = simplify_gen_binary (AND, tmode,
12198 					     SUBREG_REG (XEXP (op0, 0)),
12199 					     gen_int_mode (c1, tmode));
12200 		  op0 = gen_lowpart (mode, op0);
12201 		  continue;
12202 		}
12203 	    }
12204 
12205 	  /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0).  */
12206 	  if (const_op == 0 && equality_comparison_p
12207 	      && XEXP (op0, 1) == const1_rtx
12208 	      && GET_CODE (XEXP (op0, 0)) == NOT)
12209 	    {
12210 	      op0 = simplify_and_const_int (NULL_RTX, mode,
12211 					    XEXP (XEXP (op0, 0), 0), 1);
12212 	      code = (code == NE ? EQ : NE);
12213 	      continue;
12214 	    }
12215 
12216 	  /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12217 	     (eq (and (lshiftrt X) 1) 0).
12218 	     Also handle the case where (not X) is expressed using xor.  */
12219 	  if (const_op == 0 && equality_comparison_p
12220 	      && XEXP (op0, 1) == const1_rtx
12221 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12222 	    {
12223 	      rtx shift_op = XEXP (XEXP (op0, 0), 0);
12224 	      rtx shift_count = XEXP (XEXP (op0, 0), 1);
12225 
12226 	      if (GET_CODE (shift_op) == NOT
12227 		  || (GET_CODE (shift_op) == XOR
12228 		      && CONST_INT_P (XEXP (shift_op, 1))
12229 		      && CONST_INT_P (shift_count)
12230 		      && HWI_COMPUTABLE_MODE_P (mode)
12231 		      && (UINTVAL (XEXP (shift_op, 1))
12232 			  == (unsigned HOST_WIDE_INT) 1
12233 			       << INTVAL (shift_count))))
12234 		{
12235 		  op0
12236 		    = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12237 		  op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12238 		  code = (code == NE ? EQ : NE);
12239 		  continue;
12240 		}
12241 	    }
12242 	  break;
12243 
12244 	case ASHIFT:
12245 	  /* If we have (compare (ashift FOO N) (const_int C)) and
12246 	     the high order N bits of FOO (N+1 if an inequality comparison)
12247 	     are known to be zero, we can do this by comparing FOO with C
12248 	     shifted right N bits so long as the low-order N bits of C are
12249 	     zero.  */
12250 	  if (CONST_INT_P (XEXP (op0, 1))
12251 	      && INTVAL (XEXP (op0, 1)) >= 0
12252 	      && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12253 		  < HOST_BITS_PER_WIDE_INT)
12254 	      && (((unsigned HOST_WIDE_INT) const_op
12255 		   & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
12256 		      - 1)) == 0)
12257 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12258 	      && (nonzero_bits (XEXP (op0, 0), mode)
12259 		  & ~(mask >> (INTVAL (XEXP (op0, 1))
12260 			       + ! equality_comparison_p))) == 0)
12261 	    {
12262 	      /* We must perform a logical shift, not an arithmetic one,
12263 		 as we want the top N bits of C to be zero.  */
12264 	      unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12265 
12266 	      temp >>= INTVAL (XEXP (op0, 1));
12267 	      op1 = gen_int_mode (temp, mode);
12268 	      op0 = XEXP (op0, 0);
12269 	      continue;
12270 	    }
12271 
12272 	  /* If we are doing a sign bit comparison, it means we are testing
12273 	     a particular bit.  Convert it to the appropriate AND.  */
12274 	  if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12275 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
12276 	    {
12277 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12278 					    ((unsigned HOST_WIDE_INT) 1
12279 					     << (mode_width - 1
12280 						 - INTVAL (XEXP (op0, 1)))));
12281 	      code = (code == LT ? NE : EQ);
12282 	      continue;
12283 	    }
12284 
12285 	  /* If this an equality comparison with zero and we are shifting
12286 	     the low bit to the sign bit, we can convert this to an AND of the
12287 	     low-order bit.  */
12288 	  if (const_op == 0 && equality_comparison_p
12289 	      && CONST_INT_P (XEXP (op0, 1))
12290 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12291 	    {
12292 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12293 	      continue;
12294 	    }
12295 	  break;
12296 
12297 	case ASHIFTRT:
12298 	  /* If this is an equality comparison with zero, we can do this
12299 	     as a logical shift, which might be much simpler.  */
12300 	  if (equality_comparison_p && const_op == 0
12301 	      && CONST_INT_P (XEXP (op0, 1)))
12302 	    {
12303 	      op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12304 					  XEXP (op0, 0),
12305 					  INTVAL (XEXP (op0, 1)));
12306 	      continue;
12307 	    }
12308 
12309 	  /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12310 	     do the comparison in a narrower mode.  */
12311 	  if (! unsigned_comparison_p
12312 	      && CONST_INT_P (XEXP (op0, 1))
12313 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12314 	      && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12315 	      && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12316 					 MODE_INT, 1)) != BLKmode
12317 	      && (((unsigned HOST_WIDE_INT) const_op
12318 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12319 		  <= GET_MODE_MASK (tmode)))
12320 	    {
12321 	      op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12322 	      continue;
12323 	    }
12324 
12325 	  /* Likewise if OP0 is a PLUS of a sign extension with a
12326 	     constant, which is usually represented with the PLUS
12327 	     between the shifts.  */
12328 	  if (! unsigned_comparison_p
12329 	      && CONST_INT_P (XEXP (op0, 1))
12330 	      && GET_CODE (XEXP (op0, 0)) == PLUS
12331 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12332 	      && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12333 	      && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12334 	      && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12335 					 MODE_INT, 1)) != BLKmode
12336 	      && (((unsigned HOST_WIDE_INT) const_op
12337 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12338 		  <= GET_MODE_MASK (tmode)))
12339 	    {
12340 	      rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12341 	      rtx add_const = XEXP (XEXP (op0, 0), 1);
12342 	      rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
12343 						   add_const, XEXP (op0, 1));
12344 
12345 	      op0 = simplify_gen_binary (PLUS, tmode,
12346 					 gen_lowpart (tmode, inner),
12347 					 new_const);
12348 	      continue;
12349 	    }
12350 
12351 	  /* ... fall through ...  */
12352 	case LSHIFTRT:
12353 	  /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12354 	     the low order N bits of FOO are known to be zero, we can do this
12355 	     by comparing FOO with C shifted left N bits so long as no
12356 	     overflow occurs.  Even if the low order N bits of FOO aren't known
12357 	     to be zero, if the comparison is >= or < we can use the same
12358 	     optimization and for > or <= by setting all the low
12359 	     order N bits in the comparison constant.  */
12360 	  if (CONST_INT_P (XEXP (op0, 1))
12361 	      && INTVAL (XEXP (op0, 1)) > 0
12362 	      && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12363 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12364 	      && (((unsigned HOST_WIDE_INT) const_op
12365 		   + (GET_CODE (op0) != LSHIFTRT
12366 		      ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12367 			 + 1)
12368 		      : 0))
12369 		  <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12370 	    {
12371 	      unsigned HOST_WIDE_INT low_bits
12372 		= (nonzero_bits (XEXP (op0, 0), mode)
12373 		   & (((unsigned HOST_WIDE_INT) 1
12374 		       << INTVAL (XEXP (op0, 1))) - 1));
12375 	      if (low_bits == 0 || !equality_comparison_p)
12376 		{
12377 		  /* If the shift was logical, then we must make the condition
12378 		     unsigned.  */
12379 		  if (GET_CODE (op0) == LSHIFTRT)
12380 		    code = unsigned_condition (code);
12381 
12382 		  const_op <<= INTVAL (XEXP (op0, 1));
12383 		  if (low_bits != 0
12384 		      && (code == GT || code == GTU
12385 			  || code == LE || code == LEU))
12386 		    const_op
12387 		      |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
12388 		  op1 = GEN_INT (const_op);
12389 		  op0 = XEXP (op0, 0);
12390 		  continue;
12391 		}
12392 	    }
12393 
12394 	  /* If we are using this shift to extract just the sign bit, we
12395 	     can replace this with an LT or GE comparison.  */
12396 	  if (const_op == 0
12397 	      && (equality_comparison_p || sign_bit_comparison_p)
12398 	      && CONST_INT_P (XEXP (op0, 1))
12399 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12400 	    {
12401 	      op0 = XEXP (op0, 0);
12402 	      code = (code == NE || code == GT ? LT : GE);
12403 	      continue;
12404 	    }
12405 	  break;
12406 
12407 	default:
12408 	  break;
12409 	}
12410 
12411       break;
12412     }
12413 
12414   /* Now make any compound operations involved in this comparison.  Then,
12415      check for an outmost SUBREG on OP0 that is not doing anything or is
12416      paradoxical.  The latter transformation must only be performed when
12417      it is known that the "extra" bits will be the same in op0 and op1 or
12418      that they don't matter.  There are three cases to consider:
12419 
12420      1. SUBREG_REG (op0) is a register.  In this case the bits are don't
12421      care bits and we can assume they have any convenient value.  So
12422      making the transformation is safe.
12423 
12424      2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
12425      In this case the upper bits of op0 are undefined.  We should not make
12426      the simplification in that case as we do not know the contents of
12427      those bits.
12428 
12429      3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12430      UNKNOWN.  In that case we know those bits are zeros or ones.  We must
12431      also be sure that they are the same as the upper bits of op1.
12432 
12433      We can never remove a SUBREG for a non-equality comparison because
12434      the sign bit is in a different place in the underlying object.  */
12435 
12436   rtx_code op0_mco_code = SET;
12437   if (op1 == const0_rtx)
12438     op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12439 
12440   op0 = make_compound_operation (op0, op0_mco_code);
12441   op1 = make_compound_operation (op1, SET);
12442 
12443   if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12444       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12445       && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12446       && (code == NE || code == EQ))
12447     {
12448       if (paradoxical_subreg_p (op0))
12449 	{
12450 	  /* For paradoxical subregs, allow case 1 as above.  Case 3 isn't
12451 	     implemented.  */
12452 	  if (REG_P (SUBREG_REG (op0)))
12453 	    {
12454 	      op0 = SUBREG_REG (op0);
12455 	      op1 = gen_lowpart (GET_MODE (op0), op1);
12456 	    }
12457 	}
12458       else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12459 		<= HOST_BITS_PER_WIDE_INT)
12460 	       && (nonzero_bits (SUBREG_REG (op0),
12461 				 GET_MODE (SUBREG_REG (op0)))
12462 		   & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12463 	{
12464 	  tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12465 
12466 	  if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12467 	       & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12468 	    op0 = SUBREG_REG (op0), op1 = tem;
12469 	}
12470     }
12471 
12472   /* We now do the opposite procedure: Some machines don't have compare
12473      insns in all modes.  If OP0's mode is an integer mode smaller than a
12474      word and we can't do a compare in that mode, see if there is a larger
12475      mode for which we can do the compare.  There are a number of cases in
12476      which we can use the wider mode.  */
12477 
12478   mode = GET_MODE (op0);
12479   if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12480       && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12481       && ! have_insn_for (COMPARE, mode))
12482     for (tmode = GET_MODE_WIDER_MODE (mode);
12483 	 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12484 	 tmode = GET_MODE_WIDER_MODE (tmode))
12485       if (have_insn_for (COMPARE, tmode))
12486 	{
12487 	  int zero_extended;
12488 
12489 	  /* If this is a test for negative, we can make an explicit
12490 	     test of the sign bit.  Test this first so we can use
12491 	     a paradoxical subreg to extend OP0.  */
12492 
12493 	  if (op1 == const0_rtx && (code == LT || code == GE)
12494 	      && HWI_COMPUTABLE_MODE_P (mode))
12495 	    {
12496 	      unsigned HOST_WIDE_INT sign
12497 		= (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1);
12498 	      op0 = simplify_gen_binary (AND, tmode,
12499 					 gen_lowpart (tmode, op0),
12500 					 gen_int_mode (sign, tmode));
12501 	      code = (code == LT) ? NE : EQ;
12502 	      break;
12503 	    }
12504 
12505 	  /* If the only nonzero bits in OP0 and OP1 are those in the
12506 	     narrower mode and this is an equality or unsigned comparison,
12507 	     we can use the wider mode.  Similarly for sign-extended
12508 	     values, in which case it is true for all comparisons.  */
12509 	  zero_extended = ((code == EQ || code == NE
12510 			    || code == GEU || code == GTU
12511 			    || code == LEU || code == LTU)
12512 			   && (nonzero_bits (op0, tmode)
12513 			       & ~GET_MODE_MASK (mode)) == 0
12514 			   && ((CONST_INT_P (op1)
12515 				|| (nonzero_bits (op1, tmode)
12516 				    & ~GET_MODE_MASK (mode)) == 0)));
12517 
12518 	  if (zero_extended
12519 	      || ((num_sign_bit_copies (op0, tmode)
12520 		   > (unsigned int) (GET_MODE_PRECISION (tmode)
12521 				     - GET_MODE_PRECISION (mode)))
12522 		  && (num_sign_bit_copies (op1, tmode)
12523 		      > (unsigned int) (GET_MODE_PRECISION (tmode)
12524 					- GET_MODE_PRECISION (mode)))))
12525 	    {
12526 	      /* If OP0 is an AND and we don't have an AND in MODE either,
12527 		 make a new AND in the proper mode.  */
12528 	      if (GET_CODE (op0) == AND
12529 		  && !have_insn_for (AND, mode))
12530 		op0 = simplify_gen_binary (AND, tmode,
12531 					   gen_lowpart (tmode,
12532 							XEXP (op0, 0)),
12533 					   gen_lowpart (tmode,
12534 							XEXP (op0, 1)));
12535 	      else
12536 		{
12537 		  if (zero_extended)
12538 		    {
12539 		      op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12540 		      op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12541 		    }
12542 		  else
12543 		    {
12544 		      op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12545 		      op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12546 		    }
12547 		  break;
12548 		}
12549 	    }
12550 	}
12551 
12552   /* We may have changed the comparison operands.  Re-canonicalize.  */
12553   if (swap_commutative_operands_p (op0, op1))
12554     {
12555       std::swap (op0, op1);
12556       code = swap_condition (code);
12557     }
12558 
12559   /* If this machine only supports a subset of valid comparisons, see if we
12560      can convert an unsupported one into a supported one.  */
12561   target_canonicalize_comparison (&code, &op0, &op1, 0);
12562 
12563   *pop0 = op0;
12564   *pop1 = op1;
12565 
12566   return code;
12567 }
12568 
12569 /* Utility function for record_value_for_reg.  Count number of
12570    rtxs in X.  */
12571 static int
12572 count_rtxs (rtx x)
12573 {
12574   enum rtx_code code = GET_CODE (x);
12575   const char *fmt;
12576   int i, j, ret = 1;
12577 
12578   if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12579       || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12580     {
12581       rtx x0 = XEXP (x, 0);
12582       rtx x1 = XEXP (x, 1);
12583 
12584       if (x0 == x1)
12585 	return 1 + 2 * count_rtxs (x0);
12586 
12587       if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12588 	   || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12589 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12590 	return 2 + 2 * count_rtxs (x0)
12591 	       + count_rtxs (x == XEXP (x1, 0)
12592 			     ? XEXP (x1, 1) : XEXP (x1, 0));
12593 
12594       if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12595 	   || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12596 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12597 	return 2 + 2 * count_rtxs (x1)
12598 	       + count_rtxs (x == XEXP (x0, 0)
12599 			     ? XEXP (x0, 1) : XEXP (x0, 0));
12600     }
12601 
12602   fmt = GET_RTX_FORMAT (code);
12603   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12604     if (fmt[i] == 'e')
12605       ret += count_rtxs (XEXP (x, i));
12606     else if (fmt[i] == 'E')
12607       for (j = 0; j < XVECLEN (x, i); j++)
12608 	ret += count_rtxs (XVECEXP (x, i, j));
12609 
12610   return ret;
12611 }
12612 
12613 /* Utility function for following routine.  Called when X is part of a value
12614    being stored into last_set_value.  Sets last_set_table_tick
12615    for each register mentioned.  Similar to mention_regs in cse.c  */
12616 
12617 static void
12618 update_table_tick (rtx x)
12619 {
12620   enum rtx_code code = GET_CODE (x);
12621   const char *fmt = GET_RTX_FORMAT (code);
12622   int i, j;
12623 
12624   if (code == REG)
12625     {
12626       unsigned int regno = REGNO (x);
12627       unsigned int endregno = END_REGNO (x);
12628       unsigned int r;
12629 
12630       for (r = regno; r < endregno; r++)
12631 	{
12632 	  reg_stat_type *rsp = &reg_stat[r];
12633 	  rsp->last_set_table_tick = label_tick;
12634 	}
12635 
12636       return;
12637     }
12638 
12639   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12640     if (fmt[i] == 'e')
12641       {
12642 	/* Check for identical subexpressions.  If x contains
12643 	   identical subexpression we only have to traverse one of
12644 	   them.  */
12645 	if (i == 0 && ARITHMETIC_P (x))
12646 	  {
12647 	    /* Note that at this point x1 has already been
12648 	       processed.  */
12649 	    rtx x0 = XEXP (x, 0);
12650 	    rtx x1 = XEXP (x, 1);
12651 
12652 	    /* If x0 and x1 are identical then there is no need to
12653 	       process x0.  */
12654 	    if (x0 == x1)
12655 	      break;
12656 
12657 	    /* If x0 is identical to a subexpression of x1 then while
12658 	       processing x1, x0 has already been processed.  Thus we
12659 	       are done with x.  */
12660 	    if (ARITHMETIC_P (x1)
12661 		&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12662 	      break;
12663 
12664 	    /* If x1 is identical to a subexpression of x0 then we
12665 	       still have to process the rest of x0.  */
12666 	    if (ARITHMETIC_P (x0)
12667 		&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12668 	      {
12669 		update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12670 		break;
12671 	      }
12672 	  }
12673 
12674 	update_table_tick (XEXP (x, i));
12675       }
12676     else if (fmt[i] == 'E')
12677       for (j = 0; j < XVECLEN (x, i); j++)
12678 	update_table_tick (XVECEXP (x, i, j));
12679 }
12680 
12681 /* Record that REG is set to VALUE in insn INSN.  If VALUE is zero, we
12682    are saying that the register is clobbered and we no longer know its
12683    value.  If INSN is zero, don't update reg_stat[].last_set; this is
12684    only permitted with VALUE also zero and is used to invalidate the
12685    register.  */
12686 
12687 static void
12688 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
12689 {
12690   unsigned int regno = REGNO (reg);
12691   unsigned int endregno = END_REGNO (reg);
12692   unsigned int i;
12693   reg_stat_type *rsp;
12694 
12695   /* If VALUE contains REG and we have a previous value for REG, substitute
12696      the previous value.  */
12697   if (value && insn && reg_overlap_mentioned_p (reg, value))
12698     {
12699       rtx tem;
12700 
12701       /* Set things up so get_last_value is allowed to see anything set up to
12702 	 our insn.  */
12703       subst_low_luid = DF_INSN_LUID (insn);
12704       tem = get_last_value (reg);
12705 
12706       /* If TEM is simply a binary operation with two CLOBBERs as operands,
12707 	 it isn't going to be useful and will take a lot of time to process,
12708 	 so just use the CLOBBER.  */
12709 
12710       if (tem)
12711 	{
12712 	  if (ARITHMETIC_P (tem)
12713 	      && GET_CODE (XEXP (tem, 0)) == CLOBBER
12714 	      && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12715 	    tem = XEXP (tem, 0);
12716 	  else if (count_occurrences (value, reg, 1) >= 2)
12717 	    {
12718 	      /* If there are two or more occurrences of REG in VALUE,
12719 		 prevent the value from growing too much.  */
12720 	      if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12721 		tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12722 	    }
12723 
12724 	  value = replace_rtx (copy_rtx (value), reg, tem);
12725 	}
12726     }
12727 
12728   /* For each register modified, show we don't know its value, that
12729      we don't know about its bitwise content, that its value has been
12730      updated, and that we don't know the location of the death of the
12731      register.  */
12732   for (i = regno; i < endregno; i++)
12733     {
12734       rsp = &reg_stat[i];
12735 
12736       if (insn)
12737 	rsp->last_set = insn;
12738 
12739       rsp->last_set_value = 0;
12740       rsp->last_set_mode = VOIDmode;
12741       rsp->last_set_nonzero_bits = 0;
12742       rsp->last_set_sign_bit_copies = 0;
12743       rsp->last_death = 0;
12744       rsp->truncated_to_mode = VOIDmode;
12745     }
12746 
12747   /* Mark registers that are being referenced in this value.  */
12748   if (value)
12749     update_table_tick (value);
12750 
12751   /* Now update the status of each register being set.
12752      If someone is using this register in this block, set this register
12753      to invalid since we will get confused between the two lives in this
12754      basic block.  This makes using this register always invalid.  In cse, we
12755      scan the table to invalidate all entries using this register, but this
12756      is too much work for us.  */
12757 
12758   for (i = regno; i < endregno; i++)
12759     {
12760       rsp = &reg_stat[i];
12761       rsp->last_set_label = label_tick;
12762       if (!insn
12763 	  || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12764 	rsp->last_set_invalid = 1;
12765       else
12766 	rsp->last_set_invalid = 0;
12767     }
12768 
12769   /* The value being assigned might refer to X (like in "x++;").  In that
12770      case, we must replace it with (clobber (const_int 0)) to prevent
12771      infinite loops.  */
12772   rsp = &reg_stat[regno];
12773   if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12774     {
12775       value = copy_rtx (value);
12776       if (!get_last_value_validate (&value, insn, label_tick, 1))
12777 	value = 0;
12778     }
12779 
12780   /* For the main register being modified, update the value, the mode, the
12781      nonzero bits, and the number of sign bit copies.  */
12782 
12783   rsp->last_set_value = value;
12784 
12785   if (value)
12786     {
12787       machine_mode mode = GET_MODE (reg);
12788       subst_low_luid = DF_INSN_LUID (insn);
12789       rsp->last_set_mode = mode;
12790       if (GET_MODE_CLASS (mode) == MODE_INT
12791 	  && HWI_COMPUTABLE_MODE_P (mode))
12792 	mode = nonzero_bits_mode;
12793       rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12794       rsp->last_set_sign_bit_copies
12795 	= num_sign_bit_copies (value, GET_MODE (reg));
12796     }
12797 }
12798 
12799 /* Called via note_stores from record_dead_and_set_regs to handle one
12800    SET or CLOBBER in an insn.  DATA is the instruction in which the
12801    set is occurring.  */
12802 
12803 static void
12804 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12805 {
12806   rtx_insn *record_dead_insn = (rtx_insn *) data;
12807 
12808   if (GET_CODE (dest) == SUBREG)
12809     dest = SUBREG_REG (dest);
12810 
12811   if (!record_dead_insn)
12812     {
12813       if (REG_P (dest))
12814 	record_value_for_reg (dest, NULL, NULL_RTX);
12815       return;
12816     }
12817 
12818   if (REG_P (dest))
12819     {
12820       /* If we are setting the whole register, we know its value.  Otherwise
12821 	 show that we don't know the value.  We can handle SUBREG in
12822 	 some cases.  */
12823       if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12824 	record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12825       else if (GET_CODE (setter) == SET
12826 	       && GET_CODE (SET_DEST (setter)) == SUBREG
12827 	       && SUBREG_REG (SET_DEST (setter)) == dest
12828 	       && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12829 	       && subreg_lowpart_p (SET_DEST (setter)))
12830 	record_value_for_reg (dest, record_dead_insn,
12831 			      gen_lowpart (GET_MODE (dest),
12832 						       SET_SRC (setter)));
12833       else
12834 	record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12835     }
12836   else if (MEM_P (dest)
12837 	   /* Ignore pushes, they clobber nothing.  */
12838 	   && ! push_operand (dest, GET_MODE (dest)))
12839     mem_last_set = DF_INSN_LUID (record_dead_insn);
12840 }
12841 
12842 /* Update the records of when each REG was most recently set or killed
12843    for the things done by INSN.  This is the last thing done in processing
12844    INSN in the combiner loop.
12845 
12846    We update reg_stat[], in particular fields last_set, last_set_value,
12847    last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12848    last_death, and also the similar information mem_last_set (which insn
12849    most recently modified memory) and last_call_luid (which insn was the
12850    most recent subroutine call).  */
12851 
12852 static void
12853 record_dead_and_set_regs (rtx_insn *insn)
12854 {
12855   rtx link;
12856   unsigned int i;
12857 
12858   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12859     {
12860       if (REG_NOTE_KIND (link) == REG_DEAD
12861 	  && REG_P (XEXP (link, 0)))
12862 	{
12863 	  unsigned int regno = REGNO (XEXP (link, 0));
12864 	  unsigned int endregno = END_REGNO (XEXP (link, 0));
12865 
12866 	  for (i = regno; i < endregno; i++)
12867 	    {
12868 	      reg_stat_type *rsp;
12869 
12870 	      rsp = &reg_stat[i];
12871 	      rsp->last_death = insn;
12872 	    }
12873 	}
12874       else if (REG_NOTE_KIND (link) == REG_INC)
12875 	record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12876     }
12877 
12878   if (CALL_P (insn))
12879     {
12880       hard_reg_set_iterator hrsi;
12881       EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
12882 	{
12883 	  reg_stat_type *rsp;
12884 
12885 	  rsp = &reg_stat[i];
12886 	  rsp->last_set_invalid = 1;
12887 	  rsp->last_set = insn;
12888 	  rsp->last_set_value = 0;
12889 	  rsp->last_set_mode = VOIDmode;
12890 	  rsp->last_set_nonzero_bits = 0;
12891 	  rsp->last_set_sign_bit_copies = 0;
12892 	  rsp->last_death = 0;
12893 	  rsp->truncated_to_mode = VOIDmode;
12894 	}
12895 
12896       last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12897 
12898       /* We can't combine into a call pattern.  Remember, though, that
12899 	 the return value register is set at this LUID.  We could
12900 	 still replace a register with the return value from the
12901 	 wrong subroutine call!  */
12902       note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12903     }
12904   else
12905     note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12906 }
12907 
12908 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12909    register present in the SUBREG, so for each such SUBREG go back and
12910    adjust nonzero and sign bit information of the registers that are
12911    known to have some zero/sign bits set.
12912 
12913    This is needed because when combine blows the SUBREGs away, the
12914    information on zero/sign bits is lost and further combines can be
12915    missed because of that.  */
12916 
12917 static void
12918 record_promoted_value (rtx_insn *insn, rtx subreg)
12919 {
12920   struct insn_link *links;
12921   rtx set;
12922   unsigned int regno = REGNO (SUBREG_REG (subreg));
12923   machine_mode mode = GET_MODE (subreg);
12924 
12925   if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12926     return;
12927 
12928   for (links = LOG_LINKS (insn); links;)
12929     {
12930       reg_stat_type *rsp;
12931 
12932       insn = links->insn;
12933       set = single_set (insn);
12934 
12935       if (! set || !REG_P (SET_DEST (set))
12936 	  || REGNO (SET_DEST (set)) != regno
12937 	  || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12938 	{
12939 	  links = links->next;
12940 	  continue;
12941 	}
12942 
12943       rsp = &reg_stat[regno];
12944       if (rsp->last_set == insn)
12945 	{
12946 	  if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
12947 	    rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12948 	}
12949 
12950       if (REG_P (SET_SRC (set)))
12951 	{
12952 	  regno = REGNO (SET_SRC (set));
12953 	  links = LOG_LINKS (insn);
12954 	}
12955       else
12956 	break;
12957     }
12958 }
12959 
12960 /* Check if X, a register, is known to contain a value already
12961    truncated to MODE.  In this case we can use a subreg to refer to
12962    the truncated value even though in the generic case we would need
12963    an explicit truncation.  */
12964 
12965 static bool
12966 reg_truncated_to_mode (machine_mode mode, const_rtx x)
12967 {
12968   reg_stat_type *rsp = &reg_stat[REGNO (x)];
12969   machine_mode truncated = rsp->truncated_to_mode;
12970 
12971   if (truncated == 0
12972       || rsp->truncation_label < label_tick_ebb_start)
12973     return false;
12974   if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12975     return true;
12976   if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12977     return true;
12978   return false;
12979 }
12980 
12981 /* If X is a hard reg or a subreg record the mode that the register is
12982    accessed in.  For non-TRULY_NOOP_TRUNCATION targets we might be able
12983    to turn a truncate into a subreg using this information.  Return true
12984    if traversing X is complete.  */
12985 
12986 static bool
12987 record_truncated_value (rtx x)
12988 {
12989   machine_mode truncated_mode;
12990   reg_stat_type *rsp;
12991 
12992   if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12993     {
12994       machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12995       truncated_mode = GET_MODE (x);
12996 
12997       if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12998 	return true;
12999 
13000       if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13001 	return true;
13002 
13003       x = SUBREG_REG (x);
13004     }
13005   /* ??? For hard-regs we now record everything.  We might be able to
13006      optimize this using last_set_mode.  */
13007   else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13008     truncated_mode = GET_MODE (x);
13009   else
13010     return false;
13011 
13012   rsp = &reg_stat[REGNO (x)];
13013   if (rsp->truncated_to_mode == 0
13014       || rsp->truncation_label < label_tick_ebb_start
13015       || (GET_MODE_SIZE (truncated_mode)
13016 	  < GET_MODE_SIZE (rsp->truncated_to_mode)))
13017     {
13018       rsp->truncated_to_mode = truncated_mode;
13019       rsp->truncation_label = label_tick;
13020     }
13021 
13022   return true;
13023 }
13024 
13025 /* Callback for note_uses.  Find hardregs and subregs of pseudos and
13026    the modes they are used in.  This can help truning TRUNCATEs into
13027    SUBREGs.  */
13028 
13029 static void
13030 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13031 {
13032   subrtx_var_iterator::array_type array;
13033   FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13034     if (record_truncated_value (*iter))
13035       iter.skip_subrtxes ();
13036 }
13037 
13038 /* Scan X for promoted SUBREGs.  For each one found,
13039    note what it implies to the registers used in it.  */
13040 
13041 static void
13042 check_promoted_subreg (rtx_insn *insn, rtx x)
13043 {
13044   if (GET_CODE (x) == SUBREG
13045       && SUBREG_PROMOTED_VAR_P (x)
13046       && REG_P (SUBREG_REG (x)))
13047     record_promoted_value (insn, x);
13048   else
13049     {
13050       const char *format = GET_RTX_FORMAT (GET_CODE (x));
13051       int i, j;
13052 
13053       for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13054 	switch (format[i])
13055 	  {
13056 	  case 'e':
13057 	    check_promoted_subreg (insn, XEXP (x, i));
13058 	    break;
13059 	  case 'V':
13060 	  case 'E':
13061 	    if (XVEC (x, i) != 0)
13062 	      for (j = 0; j < XVECLEN (x, i); j++)
13063 		check_promoted_subreg (insn, XVECEXP (x, i, j));
13064 	    break;
13065 	  }
13066     }
13067 }
13068 
13069 /* Verify that all the registers and memory references mentioned in *LOC are
13070    still valid.  *LOC was part of a value set in INSN when label_tick was
13071    equal to TICK.  Return 0 if some are not.  If REPLACE is nonzero, replace
13072    the invalid references with (clobber (const_int 0)) and return 1.  This
13073    replacement is useful because we often can get useful information about
13074    the form of a value (e.g., if it was produced by a shift that always
13075    produces -1 or 0) even though we don't know exactly what registers it
13076    was produced from.  */
13077 
13078 static int
13079 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13080 {
13081   rtx x = *loc;
13082   const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13083   int len = GET_RTX_LENGTH (GET_CODE (x));
13084   int i, j;
13085 
13086   if (REG_P (x))
13087     {
13088       unsigned int regno = REGNO (x);
13089       unsigned int endregno = END_REGNO (x);
13090       unsigned int j;
13091 
13092       for (j = regno; j < endregno; j++)
13093 	{
13094 	  reg_stat_type *rsp = &reg_stat[j];
13095 	  if (rsp->last_set_invalid
13096 	      /* If this is a pseudo-register that was only set once and not
13097 		 live at the beginning of the function, it is always valid.  */
13098 	      || (! (regno >= FIRST_PSEUDO_REGISTER
13099 		     && regno < reg_n_sets_max
13100 		     && REG_N_SETS (regno) == 1
13101 		     && (!REGNO_REG_SET_P
13102 			 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13103 			  regno)))
13104 		  && rsp->last_set_label > tick))
13105 	  {
13106 	    if (replace)
13107 	      *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13108 	    return replace;
13109 	  }
13110 	}
13111 
13112       return 1;
13113     }
13114   /* If this is a memory reference, make sure that there were no stores after
13115      it that might have clobbered the value.  We don't have alias info, so we
13116      assume any store invalidates it.  Moreover, we only have local UIDs, so
13117      we also assume that there were stores in the intervening basic blocks.  */
13118   else if (MEM_P (x) && !MEM_READONLY_P (x)
13119 	   && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13120     {
13121       if (replace)
13122 	*loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13123       return replace;
13124     }
13125 
13126   for (i = 0; i < len; i++)
13127     {
13128       if (fmt[i] == 'e')
13129 	{
13130 	  /* Check for identical subexpressions.  If x contains
13131 	     identical subexpression we only have to traverse one of
13132 	     them.  */
13133 	  if (i == 1 && ARITHMETIC_P (x))
13134 	    {
13135 	      /* Note that at this point x0 has already been checked
13136 		 and found valid.  */
13137 	      rtx x0 = XEXP (x, 0);
13138 	      rtx x1 = XEXP (x, 1);
13139 
13140 	      /* If x0 and x1 are identical then x is also valid.  */
13141 	      if (x0 == x1)
13142 		return 1;
13143 
13144 	      /* If x1 is identical to a subexpression of x0 then
13145 		 while checking x0, x1 has already been checked.  Thus
13146 		 it is valid and so as x.  */
13147 	      if (ARITHMETIC_P (x0)
13148 		  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13149 		return 1;
13150 
13151 	      /* If x0 is identical to a subexpression of x1 then x is
13152 		 valid iff the rest of x1 is valid.  */
13153 	      if (ARITHMETIC_P (x1)
13154 		  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13155 		return
13156 		  get_last_value_validate (&XEXP (x1,
13157 						  x0 == XEXP (x1, 0) ? 1 : 0),
13158 					   insn, tick, replace);
13159 	    }
13160 
13161 	  if (get_last_value_validate (&XEXP (x, i), insn, tick,
13162 				       replace) == 0)
13163 	    return 0;
13164 	}
13165       else if (fmt[i] == 'E')
13166 	for (j = 0; j < XVECLEN (x, i); j++)
13167 	  if (get_last_value_validate (&XVECEXP (x, i, j),
13168 				       insn, tick, replace) == 0)
13169 	    return 0;
13170     }
13171 
13172   /* If we haven't found a reason for it to be invalid, it is valid.  */
13173   return 1;
13174 }
13175 
13176 /* Get the last value assigned to X, if known.  Some registers
13177    in the value may be replaced with (clobber (const_int 0)) if their value
13178    is known longer known reliably.  */
13179 
13180 static rtx
13181 get_last_value (const_rtx x)
13182 {
13183   unsigned int regno;
13184   rtx value;
13185   reg_stat_type *rsp;
13186 
13187   /* If this is a non-paradoxical SUBREG, get the value of its operand and
13188      then convert it to the desired mode.  If this is a paradoxical SUBREG,
13189      we cannot predict what values the "extra" bits might have.  */
13190   if (GET_CODE (x) == SUBREG
13191       && subreg_lowpart_p (x)
13192       && !paradoxical_subreg_p (x)
13193       && (value = get_last_value (SUBREG_REG (x))) != 0)
13194     return gen_lowpart (GET_MODE (x), value);
13195 
13196   if (!REG_P (x))
13197     return 0;
13198 
13199   regno = REGNO (x);
13200   rsp = &reg_stat[regno];
13201   value = rsp->last_set_value;
13202 
13203   /* If we don't have a value, or if it isn't for this basic block and
13204      it's either a hard register, set more than once, or it's a live
13205      at the beginning of the function, return 0.
13206 
13207      Because if it's not live at the beginning of the function then the reg
13208      is always set before being used (is never used without being set).
13209      And, if it's set only once, and it's always set before use, then all
13210      uses must have the same last value, even if it's not from this basic
13211      block.  */
13212 
13213   if (value == 0
13214       || (rsp->last_set_label < label_tick_ebb_start
13215 	  && (regno < FIRST_PSEUDO_REGISTER
13216 	      || regno >= reg_n_sets_max
13217 	      || REG_N_SETS (regno) != 1
13218 	      || REGNO_REG_SET_P
13219 		 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13220     return 0;
13221 
13222   /* If the value was set in a later insn than the ones we are processing,
13223      we can't use it even if the register was only set once.  */
13224   if (rsp->last_set_label == label_tick
13225       && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13226     return 0;
13227 
13228   /* If fewer bits were set than what we are asked for now, we cannot use
13229      the value.  */
13230   if (GET_MODE_PRECISION (rsp->last_set_mode)
13231       < GET_MODE_PRECISION (GET_MODE (x)))
13232     return 0;
13233 
13234   /* If the value has all its registers valid, return it.  */
13235   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13236     return value;
13237 
13238   /* Otherwise, make a copy and replace any invalid register with
13239      (clobber (const_int 0)).  If that fails for some reason, return 0.  */
13240 
13241   value = copy_rtx (value);
13242   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13243     return value;
13244 
13245   return 0;
13246 }
13247 
13248 /* Return nonzero if expression X refers to a REG or to memory
13249    that is set in an instruction more recent than FROM_LUID.  */
13250 
13251 static int
13252 use_crosses_set_p (const_rtx x, int from_luid)
13253 {
13254   const char *fmt;
13255   int i;
13256   enum rtx_code code = GET_CODE (x);
13257 
13258   if (code == REG)
13259     {
13260       unsigned int regno = REGNO (x);
13261       unsigned endreg = END_REGNO (x);
13262 
13263 #ifdef PUSH_ROUNDING
13264       /* Don't allow uses of the stack pointer to be moved,
13265 	 because we don't know whether the move crosses a push insn.  */
13266       if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13267 	return 1;
13268 #endif
13269       for (; regno < endreg; regno++)
13270 	{
13271 	  reg_stat_type *rsp = &reg_stat[regno];
13272 	  if (rsp->last_set
13273 	      && rsp->last_set_label == label_tick
13274 	      && DF_INSN_LUID (rsp->last_set) > from_luid)
13275 	    return 1;
13276 	}
13277       return 0;
13278     }
13279 
13280   if (code == MEM && mem_last_set > from_luid)
13281     return 1;
13282 
13283   fmt = GET_RTX_FORMAT (code);
13284 
13285   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13286     {
13287       if (fmt[i] == 'E')
13288 	{
13289 	  int j;
13290 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13291 	    if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13292 	      return 1;
13293 	}
13294       else if (fmt[i] == 'e'
13295 	       && use_crosses_set_p (XEXP (x, i), from_luid))
13296 	return 1;
13297     }
13298   return 0;
13299 }
13300 
13301 /* Define three variables used for communication between the following
13302    routines.  */
13303 
13304 static unsigned int reg_dead_regno, reg_dead_endregno;
13305 static int reg_dead_flag;
13306 
13307 /* Function called via note_stores from reg_dead_at_p.
13308 
13309    If DEST is within [reg_dead_regno, reg_dead_endregno), set
13310    reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET.  */
13311 
13312 static void
13313 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13314 {
13315   unsigned int regno, endregno;
13316 
13317   if (!REG_P (dest))
13318     return;
13319 
13320   regno = REGNO (dest);
13321   endregno = END_REGNO (dest);
13322   if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13323     reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13324 }
13325 
13326 /* Return nonzero if REG is known to be dead at INSN.
13327 
13328    We scan backwards from INSN.  If we hit a REG_DEAD note or a CLOBBER
13329    referencing REG, it is dead.  If we hit a SET referencing REG, it is
13330    live.  Otherwise, see if it is live or dead at the start of the basic
13331    block we are in.  Hard regs marked as being live in NEWPAT_USED_REGS
13332    must be assumed to be always live.  */
13333 
13334 static int
13335 reg_dead_at_p (rtx reg, rtx_insn *insn)
13336 {
13337   basic_block block;
13338   unsigned int i;
13339 
13340   /* Set variables for reg_dead_at_p_1.  */
13341   reg_dead_regno = REGNO (reg);
13342   reg_dead_endregno = END_REGNO (reg);
13343 
13344   reg_dead_flag = 0;
13345 
13346   /* Check that reg isn't mentioned in NEWPAT_USED_REGS.  For fixed registers
13347      we allow the machine description to decide whether use-and-clobber
13348      patterns are OK.  */
13349   if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13350     {
13351       for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13352 	if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13353 	  return 0;
13354     }
13355 
13356   /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13357      beginning of basic block.  */
13358   block = BLOCK_FOR_INSN (insn);
13359   for (;;)
13360     {
13361       if (INSN_P (insn))
13362         {
13363 	  if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13364 	    return 1;
13365 
13366 	  note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13367 	  if (reg_dead_flag)
13368 	    return reg_dead_flag == 1 ? 1 : 0;
13369 
13370 	  if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13371 	    return 1;
13372         }
13373 
13374       if (insn == BB_HEAD (block))
13375 	break;
13376 
13377       insn = PREV_INSN (insn);
13378     }
13379 
13380   /* Look at live-in sets for the basic block that we were in.  */
13381   for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13382     if (REGNO_REG_SET_P (df_get_live_in (block), i))
13383       return 0;
13384 
13385   return 1;
13386 }
13387 
13388 /* Note hard registers in X that are used.  */
13389 
13390 static void
13391 mark_used_regs_combine (rtx x)
13392 {
13393   RTX_CODE code = GET_CODE (x);
13394   unsigned int regno;
13395   int i;
13396 
13397   switch (code)
13398     {
13399     case LABEL_REF:
13400     case SYMBOL_REF:
13401     case CONST:
13402     CASE_CONST_ANY:
13403     case PC:
13404     case ADDR_VEC:
13405     case ADDR_DIFF_VEC:
13406     case ASM_INPUT:
13407     /* CC0 must die in the insn after it is set, so we don't need to take
13408        special note of it here.  */
13409     case CC0:
13410       return;
13411 
13412     case CLOBBER:
13413       /* If we are clobbering a MEM, mark any hard registers inside the
13414 	 address as used.  */
13415       if (MEM_P (XEXP (x, 0)))
13416 	mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13417       return;
13418 
13419     case REG:
13420       regno = REGNO (x);
13421       /* A hard reg in a wide mode may really be multiple registers.
13422 	 If so, mark all of them just like the first.  */
13423       if (regno < FIRST_PSEUDO_REGISTER)
13424 	{
13425 	  /* None of this applies to the stack, frame or arg pointers.  */
13426 	  if (regno == STACK_POINTER_REGNUM
13427 	      || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13428 		  && regno == HARD_FRAME_POINTER_REGNUM)
13429 	      || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13430 		  && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13431 	      || regno == FRAME_POINTER_REGNUM)
13432 	    return;
13433 
13434 	  add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13435 	}
13436       return;
13437 
13438     case SET:
13439       {
13440 	/* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13441 	   the address.  */
13442 	rtx testreg = SET_DEST (x);
13443 
13444 	while (GET_CODE (testreg) == SUBREG
13445 	       || GET_CODE (testreg) == ZERO_EXTRACT
13446 	       || GET_CODE (testreg) == STRICT_LOW_PART)
13447 	  testreg = XEXP (testreg, 0);
13448 
13449 	if (MEM_P (testreg))
13450 	  mark_used_regs_combine (XEXP (testreg, 0));
13451 
13452 	mark_used_regs_combine (SET_SRC (x));
13453       }
13454       return;
13455 
13456     default:
13457       break;
13458     }
13459 
13460   /* Recursively scan the operands of this expression.  */
13461 
13462   {
13463     const char *fmt = GET_RTX_FORMAT (code);
13464 
13465     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13466       {
13467 	if (fmt[i] == 'e')
13468 	  mark_used_regs_combine (XEXP (x, i));
13469 	else if (fmt[i] == 'E')
13470 	  {
13471 	    int j;
13472 
13473 	    for (j = 0; j < XVECLEN (x, i); j++)
13474 	      mark_used_regs_combine (XVECEXP (x, i, j));
13475 	  }
13476       }
13477   }
13478 }
13479 
13480 /* Remove register number REGNO from the dead registers list of INSN.
13481 
13482    Return the note used to record the death, if there was one.  */
13483 
13484 rtx
13485 remove_death (unsigned int regno, rtx_insn *insn)
13486 {
13487   rtx note = find_regno_note (insn, REG_DEAD, regno);
13488 
13489   if (note)
13490     remove_note (insn, note);
13491 
13492   return note;
13493 }
13494 
13495 /* For each register (hardware or pseudo) used within expression X, if its
13496    death is in an instruction with luid between FROM_LUID (inclusive) and
13497    TO_INSN (exclusive), put a REG_DEAD note for that register in the
13498    list headed by PNOTES.
13499 
13500    That said, don't move registers killed by maybe_kill_insn.
13501 
13502    This is done when X is being merged by combination into TO_INSN.  These
13503    notes will then be distributed as needed.  */
13504 
13505 static void
13506 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13507 	     rtx *pnotes)
13508 {
13509   const char *fmt;
13510   int len, i;
13511   enum rtx_code code = GET_CODE (x);
13512 
13513   if (code == REG)
13514     {
13515       unsigned int regno = REGNO (x);
13516       rtx_insn *where_dead = reg_stat[regno].last_death;
13517 
13518       /* Don't move the register if it gets killed in between from and to.  */
13519       if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13520 	  && ! reg_referenced_p (x, maybe_kill_insn))
13521 	return;
13522 
13523       if (where_dead
13524 	  && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13525 	  && DF_INSN_LUID (where_dead) >= from_luid
13526 	  && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13527 	{
13528 	  rtx note = remove_death (regno, where_dead);
13529 
13530 	  /* It is possible for the call above to return 0.  This can occur
13531 	     when last_death points to I2 or I1 that we combined with.
13532 	     In that case make a new note.
13533 
13534 	     We must also check for the case where X is a hard register
13535 	     and NOTE is a death note for a range of hard registers
13536 	     including X.  In that case, we must put REG_DEAD notes for
13537 	     the remaining registers in place of NOTE.  */
13538 
13539 	  if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13540 	      && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13541 		  > GET_MODE_SIZE (GET_MODE (x))))
13542 	    {
13543 	      unsigned int deadregno = REGNO (XEXP (note, 0));
13544 	      unsigned int deadend = END_REGNO (XEXP (note, 0));
13545 	      unsigned int ourend = END_REGNO (x);
13546 	      unsigned int i;
13547 
13548 	      for (i = deadregno; i < deadend; i++)
13549 		if (i < regno || i >= ourend)
13550 		  add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13551 	    }
13552 
13553 	  /* If we didn't find any note, or if we found a REG_DEAD note that
13554 	     covers only part of the given reg, and we have a multi-reg hard
13555 	     register, then to be safe we must check for REG_DEAD notes
13556 	     for each register other than the first.  They could have
13557 	     their own REG_DEAD notes lying around.  */
13558 	  else if ((note == 0
13559 		    || (note != 0
13560 			&& (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13561 			    < GET_MODE_SIZE (GET_MODE (x)))))
13562 		   && regno < FIRST_PSEUDO_REGISTER
13563 		   && REG_NREGS (x) > 1)
13564 	    {
13565 	      unsigned int ourend = END_REGNO (x);
13566 	      unsigned int i, offset;
13567 	      rtx oldnotes = 0;
13568 
13569 	      if (note)
13570 		offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13571 	      else
13572 		offset = 1;
13573 
13574 	      for (i = regno + offset; i < ourend; i++)
13575 		move_deaths (regno_reg_rtx[i],
13576 			     maybe_kill_insn, from_luid, to_insn, &oldnotes);
13577 	    }
13578 
13579 	  if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13580 	    {
13581 	      XEXP (note, 1) = *pnotes;
13582 	      *pnotes = note;
13583 	    }
13584 	  else
13585 	    *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13586 	}
13587 
13588       return;
13589     }
13590 
13591   else if (GET_CODE (x) == SET)
13592     {
13593       rtx dest = SET_DEST (x);
13594 
13595       move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13596 
13597       /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13598 	 that accesses one word of a multi-word item, some
13599 	 piece of everything register in the expression is used by
13600 	 this insn, so remove any old death.  */
13601       /* ??? So why do we test for equality of the sizes?  */
13602 
13603       if (GET_CODE (dest) == ZERO_EXTRACT
13604 	  || GET_CODE (dest) == STRICT_LOW_PART
13605 	  || (GET_CODE (dest) == SUBREG
13606 	      && (((GET_MODE_SIZE (GET_MODE (dest))
13607 		    + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13608 		  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13609 		       + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13610 	{
13611 	  move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13612 	  return;
13613 	}
13614 
13615       /* If this is some other SUBREG, we know it replaces the entire
13616 	 value, so use that as the destination.  */
13617       if (GET_CODE (dest) == SUBREG)
13618 	dest = SUBREG_REG (dest);
13619 
13620       /* If this is a MEM, adjust deaths of anything used in the address.
13621 	 For a REG (the only other possibility), the entire value is
13622 	 being replaced so the old value is not used in this insn.  */
13623 
13624       if (MEM_P (dest))
13625 	move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13626 		     to_insn, pnotes);
13627       return;
13628     }
13629 
13630   else if (GET_CODE (x) == CLOBBER)
13631     return;
13632 
13633   len = GET_RTX_LENGTH (code);
13634   fmt = GET_RTX_FORMAT (code);
13635 
13636   for (i = 0; i < len; i++)
13637     {
13638       if (fmt[i] == 'E')
13639 	{
13640 	  int j;
13641 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13642 	    move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13643 			 to_insn, pnotes);
13644 	}
13645       else if (fmt[i] == 'e')
13646 	move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13647     }
13648 }
13649 
13650 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13651    pattern of an insn.  X must be a REG.  */
13652 
13653 static int
13654 reg_bitfield_target_p (rtx x, rtx body)
13655 {
13656   int i;
13657 
13658   if (GET_CODE (body) == SET)
13659     {
13660       rtx dest = SET_DEST (body);
13661       rtx target;
13662       unsigned int regno, tregno, endregno, endtregno;
13663 
13664       if (GET_CODE (dest) == ZERO_EXTRACT)
13665 	target = XEXP (dest, 0);
13666       else if (GET_CODE (dest) == STRICT_LOW_PART)
13667 	target = SUBREG_REG (XEXP (dest, 0));
13668       else
13669 	return 0;
13670 
13671       if (GET_CODE (target) == SUBREG)
13672 	target = SUBREG_REG (target);
13673 
13674       if (!REG_P (target))
13675 	return 0;
13676 
13677       tregno = REGNO (target), regno = REGNO (x);
13678       if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13679 	return target == x;
13680 
13681       endtregno = end_hard_regno (GET_MODE (target), tregno);
13682       endregno = end_hard_regno (GET_MODE (x), regno);
13683 
13684       return endregno > tregno && regno < endtregno;
13685     }
13686 
13687   else if (GET_CODE (body) == PARALLEL)
13688     for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13689       if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13690 	return 1;
13691 
13692   return 0;
13693 }
13694 
13695 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13696    as appropriate.  I3 and I2 are the insns resulting from the combination
13697    insns including FROM (I2 may be zero).
13698 
13699    ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13700    not need REG_DEAD notes because they are being substituted for.  This
13701    saves searching in the most common cases.
13702 
13703    Each note in the list is either ignored or placed on some insns, depending
13704    on the type of note.  */
13705 
13706 static void
13707 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
13708 		  rtx elim_i2, rtx elim_i1, rtx elim_i0)
13709 {
13710   rtx note, next_note;
13711   rtx tem_note;
13712   rtx_insn *tem_insn;
13713 
13714   for (note = notes; note; note = next_note)
13715     {
13716       rtx_insn *place = 0, *place2 = 0;
13717 
13718       next_note = XEXP (note, 1);
13719       switch (REG_NOTE_KIND (note))
13720 	{
13721 	case REG_BR_PROB:
13722 	case REG_BR_PRED:
13723 	  /* Doesn't matter much where we put this, as long as it's somewhere.
13724 	     It is preferable to keep these notes on branches, which is most
13725 	     likely to be i3.  */
13726 	  place = i3;
13727 	  break;
13728 
13729 	case REG_NON_LOCAL_GOTO:
13730 	  if (JUMP_P (i3))
13731 	    place = i3;
13732 	  else
13733 	    {
13734 	      gcc_assert (i2 && JUMP_P (i2));
13735 	      place = i2;
13736 	    }
13737 	  break;
13738 
13739 	case REG_EH_REGION:
13740 	  /* These notes must remain with the call or trapping instruction.  */
13741 	  if (CALL_P (i3))
13742 	    place = i3;
13743 	  else if (i2 && CALL_P (i2))
13744 	    place = i2;
13745 	  else
13746 	    {
13747 	      gcc_assert (cfun->can_throw_non_call_exceptions);
13748 	      if (may_trap_p (i3))
13749 		place = i3;
13750 	      else if (i2 && may_trap_p (i2))
13751 		place = i2;
13752 	      /* ??? Otherwise assume we've combined things such that we
13753 		 can now prove that the instructions can't trap.  Drop the
13754 		 note in this case.  */
13755 	    }
13756 	  break;
13757 
13758 	case REG_ARGS_SIZE:
13759 	  /* ??? How to distribute between i3-i1.  Assume i3 contains the
13760 	     entire adjustment.  Assert i3 contains at least some adjust.  */
13761 	  if (!noop_move_p (i3))
13762 	    {
13763 	      int old_size, args_size = INTVAL (XEXP (note, 0));
13764 	      /* fixup_args_size_notes looks at REG_NORETURN note,
13765 		 so ensure the note is placed there first.  */
13766 	      if (CALL_P (i3))
13767 		{
13768 		  rtx *np;
13769 		  for (np = &next_note; *np; np = &XEXP (*np, 1))
13770 		    if (REG_NOTE_KIND (*np) == REG_NORETURN)
13771 		      {
13772 			rtx n = *np;
13773 			*np = XEXP (n, 1);
13774 			XEXP (n, 1) = REG_NOTES (i3);
13775 			REG_NOTES (i3) = n;
13776 			break;
13777 		      }
13778 		}
13779 	      old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13780 	      /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13781 		 REG_ARGS_SIZE note to all noreturn calls, allow that here.  */
13782 	      gcc_assert (old_size != args_size
13783 			  || (CALL_P (i3)
13784 			      && !ACCUMULATE_OUTGOING_ARGS
13785 			      && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13786 	    }
13787 	  break;
13788 
13789 	case REG_NORETURN:
13790 	case REG_SETJMP:
13791 	case REG_TM:
13792 	case REG_CALL_DECL:
13793 	  /* These notes must remain with the call.  It should not be
13794 	     possible for both I2 and I3 to be a call.  */
13795 	  if (CALL_P (i3))
13796 	    place = i3;
13797 	  else
13798 	    {
13799 	      gcc_assert (i2 && CALL_P (i2));
13800 	      place = i2;
13801 	    }
13802 	  break;
13803 
13804 	case REG_UNUSED:
13805 	  /* Any clobbers for i3 may still exist, and so we must process
13806 	     REG_UNUSED notes from that insn.
13807 
13808 	     Any clobbers from i2 or i1 can only exist if they were added by
13809 	     recog_for_combine.  In that case, recog_for_combine created the
13810 	     necessary REG_UNUSED notes.  Trying to keep any original
13811 	     REG_UNUSED notes from these insns can cause incorrect output
13812 	     if it is for the same register as the original i3 dest.
13813 	     In that case, we will notice that the register is set in i3,
13814 	     and then add a REG_UNUSED note for the destination of i3, which
13815 	     is wrong.  However, it is possible to have REG_UNUSED notes from
13816 	     i2 or i1 for register which were both used and clobbered, so
13817 	     we keep notes from i2 or i1 if they will turn into REG_DEAD
13818 	     notes.  */
13819 
13820 	  /* If this register is set or clobbered in I3, put the note there
13821 	     unless there is one already.  */
13822 	  if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13823 	    {
13824 	      if (from_insn != i3)
13825 		break;
13826 
13827 	      if (! (REG_P (XEXP (note, 0))
13828 		     ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13829 		     : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13830 		place = i3;
13831 	    }
13832 	  /* Otherwise, if this register is used by I3, then this register
13833 	     now dies here, so we must put a REG_DEAD note here unless there
13834 	     is one already.  */
13835 	  else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13836 		   && ! (REG_P (XEXP (note, 0))
13837 			 ? find_regno_note (i3, REG_DEAD,
13838 					    REGNO (XEXP (note, 0)))
13839 			 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13840 	    {
13841 	      PUT_REG_NOTE_KIND (note, REG_DEAD);
13842 	      place = i3;
13843 	    }
13844 	  break;
13845 
13846 	case REG_EQUAL:
13847 	case REG_EQUIV:
13848 	case REG_NOALIAS:
13849 	  /* These notes say something about results of an insn.  We can
13850 	     only support them if they used to be on I3 in which case they
13851 	     remain on I3.  Otherwise they are ignored.
13852 
13853 	     If the note refers to an expression that is not a constant, we
13854 	     must also ignore the note since we cannot tell whether the
13855 	     equivalence is still true.  It might be possible to do
13856 	     slightly better than this (we only have a problem if I2DEST
13857 	     or I1DEST is present in the expression), but it doesn't
13858 	     seem worth the trouble.  */
13859 
13860 	  if (from_insn == i3
13861 	      && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13862 	    place = i3;
13863 	  break;
13864 
13865 	case REG_INC:
13866 	  /* These notes say something about how a register is used.  They must
13867 	     be present on any use of the register in I2 or I3.  */
13868 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13869 	    place = i3;
13870 
13871 	  if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13872 	    {
13873 	      if (place)
13874 		place2 = i2;
13875 	      else
13876 		place = i2;
13877 	    }
13878 	  break;
13879 
13880 	case REG_LABEL_TARGET:
13881 	case REG_LABEL_OPERAND:
13882 	  /* This can show up in several ways -- either directly in the
13883 	     pattern, or hidden off in the constant pool with (or without?)
13884 	     a REG_EQUAL note.  */
13885 	  /* ??? Ignore the without-reg_equal-note problem for now.  */
13886 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13887 	      || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13888 		  && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13889 		  && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0)))
13890 	    place = i3;
13891 
13892 	  if (i2
13893 	      && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13894 		  || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13895 		      && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13896 		      && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0))))
13897 	    {
13898 	      if (place)
13899 		place2 = i2;
13900 	      else
13901 		place = i2;
13902 	    }
13903 
13904 	  /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13905 	     as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13906 	     there.  */
13907 	  if (place && JUMP_P (place)
13908 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13909 	      && (JUMP_LABEL (place) == NULL
13910 		  || JUMP_LABEL (place) == XEXP (note, 0)))
13911 	    {
13912 	      rtx label = JUMP_LABEL (place);
13913 
13914 	      if (!label)
13915 		JUMP_LABEL (place) = XEXP (note, 0);
13916 	      else if (LABEL_P (label))
13917 		LABEL_NUSES (label)--;
13918 	    }
13919 
13920 	  if (place2 && JUMP_P (place2)
13921 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13922 	      && (JUMP_LABEL (place2) == NULL
13923 		  || JUMP_LABEL (place2) == XEXP (note, 0)))
13924 	    {
13925 	      rtx label = JUMP_LABEL (place2);
13926 
13927 	      if (!label)
13928 		JUMP_LABEL (place2) = XEXP (note, 0);
13929 	      else if (LABEL_P (label))
13930 		LABEL_NUSES (label)--;
13931 	      place2 = 0;
13932 	    }
13933 	  break;
13934 
13935 	case REG_NONNEG:
13936 	  /* This note says something about the value of a register prior
13937 	     to the execution of an insn.  It is too much trouble to see
13938 	     if the note is still correct in all situations.  It is better
13939 	     to simply delete it.  */
13940 	  break;
13941 
13942 	case REG_DEAD:
13943 	  /* If we replaced the right hand side of FROM_INSN with a
13944 	     REG_EQUAL note, the original use of the dying register
13945 	     will not have been combined into I3 and I2.  In such cases,
13946 	     FROM_INSN is guaranteed to be the first of the combined
13947 	     instructions, so we simply need to search back before
13948 	     FROM_INSN for the previous use or set of this register,
13949 	     then alter the notes there appropriately.
13950 
13951 	     If the register is used as an input in I3, it dies there.
13952 	     Similarly for I2, if it is nonzero and adjacent to I3.
13953 
13954 	     If the register is not used as an input in either I3 or I2
13955 	     and it is not one of the registers we were supposed to eliminate,
13956 	     there are two possibilities.  We might have a non-adjacent I2
13957 	     or we might have somehow eliminated an additional register
13958 	     from a computation.  For example, we might have had A & B where
13959 	     we discover that B will always be zero.  In this case we will
13960 	     eliminate the reference to A.
13961 
13962 	     In both cases, we must search to see if we can find a previous
13963 	     use of A and put the death note there.  */
13964 
13965 	  if (from_insn
13966 	      && from_insn == i2mod
13967 	      && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13968 	    tem_insn = from_insn;
13969 	  else
13970 	    {
13971 	      if (from_insn
13972 		  && CALL_P (from_insn)
13973 		  && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13974 		place = from_insn;
13975 	      else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
13976 		{
13977 		  /* If the new I2 sets the same register that is marked
13978 		     dead in the note, we do not in general know where to
13979 		     put the note.  One important case we _can_ handle is
13980 		     when the note comes from I3.  */
13981 		  if (from_insn == i3)
13982 		    place = i3;
13983 		  else
13984 		    break;
13985 		}
13986 	      else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13987 		place = i3;
13988 	      else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13989 		       && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13990 		place = i2;
13991 	      else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13992 			&& !(i2mod
13993 			     && reg_overlap_mentioned_p (XEXP (note, 0),
13994 							 i2mod_old_rhs)))
13995 		       || rtx_equal_p (XEXP (note, 0), elim_i1)
13996 		       || rtx_equal_p (XEXP (note, 0), elim_i0))
13997 		break;
13998 	      tem_insn = i3;
13999 	    }
14000 
14001 	  if (place == 0)
14002 	    {
14003 	      basic_block bb = this_basic_block;
14004 
14005 	      for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14006 		{
14007 		  if (!NONDEBUG_INSN_P (tem_insn))
14008 		    {
14009 		      if (tem_insn == BB_HEAD (bb))
14010 			break;
14011 		      continue;
14012 		    }
14013 
14014 		  /* If the register is being set at TEM_INSN, see if that is all
14015 		     TEM_INSN is doing.  If so, delete TEM_INSN.  Otherwise, make this
14016 		     into a REG_UNUSED note instead. Don't delete sets to
14017 		     global register vars.  */
14018 		  if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14019 		       || !global_regs[REGNO (XEXP (note, 0))])
14020 		      && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14021 		    {
14022 		      rtx set = single_set (tem_insn);
14023 		      rtx inner_dest = 0;
14024 		      rtx_insn *cc0_setter = NULL;
14025 
14026 		      if (set != 0)
14027 			for (inner_dest = SET_DEST (set);
14028 			     (GET_CODE (inner_dest) == STRICT_LOW_PART
14029 			      || GET_CODE (inner_dest) == SUBREG
14030 			      || GET_CODE (inner_dest) == ZERO_EXTRACT);
14031 			     inner_dest = XEXP (inner_dest, 0))
14032 			  ;
14033 
14034 		      /* Verify that it was the set, and not a clobber that
14035 			 modified the register.
14036 
14037 			 CC0 targets must be careful to maintain setter/user
14038 			 pairs.  If we cannot delete the setter due to side
14039 			 effects, mark the user with an UNUSED note instead
14040 			 of deleting it.  */
14041 
14042 		      if (set != 0 && ! side_effects_p (SET_SRC (set))
14043 			  && rtx_equal_p (XEXP (note, 0), inner_dest)
14044 			  && (!HAVE_cc0
14045 			      || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14046 				  || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14047 				      && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14048 			{
14049 			  /* Move the notes and links of TEM_INSN elsewhere.
14050 			     This might delete other dead insns recursively.
14051 			     First set the pattern to something that won't use
14052 			     any register.  */
14053 			  rtx old_notes = REG_NOTES (tem_insn);
14054 
14055 			  PATTERN (tem_insn) = pc_rtx;
14056 			  REG_NOTES (tem_insn) = NULL;
14057 
14058 			  distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14059 					    NULL_RTX, NULL_RTX, NULL_RTX);
14060 			  distribute_links (LOG_LINKS (tem_insn));
14061 
14062 			  SET_INSN_DELETED (tem_insn);
14063 			  if (tem_insn == i2)
14064 			    i2 = NULL;
14065 
14066 			  /* Delete the setter too.  */
14067 			  if (cc0_setter)
14068 			    {
14069 			      PATTERN (cc0_setter) = pc_rtx;
14070 			      old_notes = REG_NOTES (cc0_setter);
14071 			      REG_NOTES (cc0_setter) = NULL;
14072 
14073 			      distribute_notes (old_notes, cc0_setter,
14074 						cc0_setter, NULL,
14075 						NULL_RTX, NULL_RTX, NULL_RTX);
14076 			      distribute_links (LOG_LINKS (cc0_setter));
14077 
14078 			      SET_INSN_DELETED (cc0_setter);
14079 			      if (cc0_setter == i2)
14080 				i2 = NULL;
14081 			    }
14082 			}
14083 		      else
14084 			{
14085 			  PUT_REG_NOTE_KIND (note, REG_UNUSED);
14086 
14087 			  /*  If there isn't already a REG_UNUSED note, put one
14088 			      here.  Do not place a REG_DEAD note, even if
14089 			      the register is also used here; that would not
14090 			      match the algorithm used in lifetime analysis
14091 			      and can cause the consistency check in the
14092 			      scheduler to fail.  */
14093 			  if (! find_regno_note (tem_insn, REG_UNUSED,
14094 						 REGNO (XEXP (note, 0))))
14095 			    place = tem_insn;
14096 			  break;
14097 			}
14098 		    }
14099 		  else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14100 			   || (CALL_P (tem_insn)
14101 			       && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14102 		    {
14103 		      place = tem_insn;
14104 
14105 		      /* If we are doing a 3->2 combination, and we have a
14106 			 register which formerly died in i3 and was not used
14107 			 by i2, which now no longer dies in i3 and is used in
14108 			 i2 but does not die in i2, and place is between i2
14109 			 and i3, then we may need to move a link from place to
14110 			 i2.  */
14111 		      if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14112 			  && from_insn
14113 			  && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14114 			  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14115 			{
14116 			  struct insn_link *links = LOG_LINKS (place);
14117 			  LOG_LINKS (place) = NULL;
14118 			  distribute_links (links);
14119 			}
14120 		      break;
14121 		    }
14122 
14123 		  if (tem_insn == BB_HEAD (bb))
14124 		    break;
14125 		}
14126 
14127 	    }
14128 
14129 	  /* If the register is set or already dead at PLACE, we needn't do
14130 	     anything with this note if it is still a REG_DEAD note.
14131 	     We check here if it is set at all, not if is it totally replaced,
14132 	     which is what `dead_or_set_p' checks, so also check for it being
14133 	     set partially.  */
14134 
14135 	  if (place && REG_NOTE_KIND (note) == REG_DEAD)
14136 	    {
14137 	      unsigned int regno = REGNO (XEXP (note, 0));
14138 	      reg_stat_type *rsp = &reg_stat[regno];
14139 
14140 	      if (dead_or_set_p (place, XEXP (note, 0))
14141 		  || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14142 		{
14143 		  /* Unless the register previously died in PLACE, clear
14144 		     last_death.  [I no longer understand why this is
14145 		     being done.] */
14146 		  if (rsp->last_death != place)
14147 		    rsp->last_death = 0;
14148 		  place = 0;
14149 		}
14150 	      else
14151 		rsp->last_death = place;
14152 
14153 	      /* If this is a death note for a hard reg that is occupying
14154 		 multiple registers, ensure that we are still using all
14155 		 parts of the object.  If we find a piece of the object
14156 		 that is unused, we must arrange for an appropriate REG_DEAD
14157 		 note to be added for it.  However, we can't just emit a USE
14158 		 and tag the note to it, since the register might actually
14159 		 be dead; so we recourse, and the recursive call then finds
14160 		 the previous insn that used this register.  */
14161 
14162 	      if (place && REG_NREGS (XEXP (note, 0)) > 1)
14163 		{
14164 		  unsigned int endregno = END_REGNO (XEXP (note, 0));
14165 		  bool all_used = true;
14166 		  unsigned int i;
14167 
14168 		  for (i = regno; i < endregno; i++)
14169 		    if ((! refers_to_regno_p (i, PATTERN (place))
14170 			 && ! find_regno_fusage (place, USE, i))
14171 			|| dead_or_set_regno_p (place, i))
14172 		      {
14173 			all_used = false;
14174 			break;
14175 		      }
14176 
14177 		  if (! all_used)
14178 		    {
14179 		      /* Put only REG_DEAD notes for pieces that are
14180 			 not already dead or set.  */
14181 
14182 		      for (i = regno; i < endregno;
14183 			   i += hard_regno_nregs[i][reg_raw_mode[i]])
14184 			{
14185 			  rtx piece = regno_reg_rtx[i];
14186 			  basic_block bb = this_basic_block;
14187 
14188 			  if (! dead_or_set_p (place, piece)
14189 			      && ! reg_bitfield_target_p (piece,
14190 							  PATTERN (place)))
14191 			    {
14192 			      rtx new_note = alloc_reg_note (REG_DEAD, piece,
14193 							     NULL_RTX);
14194 
14195 			      distribute_notes (new_note, place, place,
14196 						NULL, NULL_RTX, NULL_RTX,
14197 						NULL_RTX);
14198 			    }
14199 			  else if (! refers_to_regno_p (i, PATTERN (place))
14200 				   && ! find_regno_fusage (place, USE, i))
14201 			    for (tem_insn = PREV_INSN (place); ;
14202 				 tem_insn = PREV_INSN (tem_insn))
14203 			      {
14204 				if (!NONDEBUG_INSN_P (tem_insn))
14205 				  {
14206 				    if (tem_insn == BB_HEAD (bb))
14207 			 	      break;
14208 				    continue;
14209 				  }
14210 				if (dead_or_set_p (tem_insn, piece)
14211 				    || reg_bitfield_target_p (piece,
14212 							      PATTERN (tem_insn)))
14213 				  {
14214 				    add_reg_note (tem_insn, REG_UNUSED, piece);
14215 				    break;
14216 				  }
14217 			      }
14218 			}
14219 
14220 		      place = 0;
14221 		    }
14222 		}
14223 	    }
14224 	  break;
14225 
14226 	default:
14227 	  /* Any other notes should not be present at this point in the
14228 	     compilation.  */
14229 	  gcc_unreachable ();
14230 	}
14231 
14232       if (place)
14233 	{
14234 	  XEXP (note, 1) = REG_NOTES (place);
14235 	  REG_NOTES (place) = note;
14236 	}
14237 
14238       if (place2)
14239 	add_shallow_copy_of_reg_note (place2, note);
14240     }
14241 }
14242 
14243 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14244    I3, I2, and I1 to new locations.  This is also called to add a link
14245    pointing at I3 when I3's destination is changed.  */
14246 
14247 static void
14248 distribute_links (struct insn_link *links)
14249 {
14250   struct insn_link *link, *next_link;
14251 
14252   for (link = links; link; link = next_link)
14253     {
14254       rtx_insn *place = 0;
14255       rtx_insn *insn;
14256       rtx set, reg;
14257 
14258       next_link = link->next;
14259 
14260       /* If the insn that this link points to is a NOTE, ignore it.  */
14261       if (NOTE_P (link->insn))
14262 	continue;
14263 
14264       set = 0;
14265       rtx pat = PATTERN (link->insn);
14266       if (GET_CODE (pat) == SET)
14267 	set = pat;
14268       else if (GET_CODE (pat) == PARALLEL)
14269 	{
14270 	  int i;
14271 	  for (i = 0; i < XVECLEN (pat, 0); i++)
14272 	    {
14273 	      set = XVECEXP (pat, 0, i);
14274 	      if (GET_CODE (set) != SET)
14275 		continue;
14276 
14277 	      reg = SET_DEST (set);
14278 	      while (GET_CODE (reg) == ZERO_EXTRACT
14279 		     || GET_CODE (reg) == STRICT_LOW_PART
14280 		     || GET_CODE (reg) == SUBREG)
14281 		reg = XEXP (reg, 0);
14282 
14283 	      if (!REG_P (reg))
14284 		continue;
14285 
14286 	      if (REGNO (reg) == link->regno)
14287 		break;
14288 	    }
14289 	  if (i == XVECLEN (pat, 0))
14290 	    continue;
14291 	}
14292       else
14293 	continue;
14294 
14295       reg = SET_DEST (set);
14296 
14297       while (GET_CODE (reg) == ZERO_EXTRACT
14298 	     || GET_CODE (reg) == STRICT_LOW_PART
14299 	     || GET_CODE (reg) == SUBREG)
14300 	reg = XEXP (reg, 0);
14301 
14302       /* A LOG_LINK is defined as being placed on the first insn that uses
14303 	 a register and points to the insn that sets the register.  Start
14304 	 searching at the next insn after the target of the link and stop
14305 	 when we reach a set of the register or the end of the basic block.
14306 
14307 	 Note that this correctly handles the link that used to point from
14308 	 I3 to I2.  Also note that not much searching is typically done here
14309 	 since most links don't point very far away.  */
14310 
14311       for (insn = NEXT_INSN (link->insn);
14312 	   (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14313 		     || BB_HEAD (this_basic_block->next_bb) != insn));
14314 	   insn = NEXT_INSN (insn))
14315 	if (DEBUG_INSN_P (insn))
14316 	  continue;
14317 	else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14318 	  {
14319 	    if (reg_referenced_p (reg, PATTERN (insn)))
14320 	      place = insn;
14321 	    break;
14322 	  }
14323 	else if (CALL_P (insn)
14324 		 && find_reg_fusage (insn, USE, reg))
14325 	  {
14326 	    place = insn;
14327 	    break;
14328 	  }
14329 	else if (INSN_P (insn) && reg_set_p (reg, insn))
14330 	  break;
14331 
14332       /* If we found a place to put the link, place it there unless there
14333 	 is already a link to the same insn as LINK at that point.  */
14334 
14335       if (place)
14336 	{
14337 	  struct insn_link *link2;
14338 
14339 	  FOR_EACH_LOG_LINK (link2, place)
14340 	    if (link2->insn == link->insn && link2->regno == link->regno)
14341 	      break;
14342 
14343 	  if (link2 == NULL)
14344 	    {
14345 	      link->next = LOG_LINKS (place);
14346 	      LOG_LINKS (place) = link;
14347 
14348 	      /* Set added_links_insn to the earliest insn we added a
14349 		 link to.  */
14350 	      if (added_links_insn == 0
14351 		  || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14352 		added_links_insn = place;
14353 	    }
14354 	}
14355     }
14356 }
14357 
14358 /* Check for any register or memory mentioned in EQUIV that is not
14359    mentioned in EXPR.  This is used to restrict EQUIV to "specializations"
14360    of EXPR where some registers may have been replaced by constants.  */
14361 
14362 static bool
14363 unmentioned_reg_p (rtx equiv, rtx expr)
14364 {
14365   subrtx_iterator::array_type array;
14366   FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14367     {
14368       const_rtx x = *iter;
14369       if ((REG_P (x) || MEM_P (x))
14370 	  && !reg_mentioned_p (x, expr))
14371 	return true;
14372     }
14373   return false;
14374 }
14375 
14376 DEBUG_FUNCTION void
14377 dump_combine_stats (FILE *file)
14378 {
14379   fprintf
14380     (file,
14381      ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14382      combine_attempts, combine_merges, combine_extras, combine_successes);
14383 }
14384 
14385 void
14386 dump_combine_total_stats (FILE *file)
14387 {
14388   fprintf
14389     (file,
14390      "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14391      total_attempts, total_merges, total_extras, total_successes);
14392 }
14393 
14394 /* Try combining insns through substitution.  */
14395 static unsigned int
14396 rest_of_handle_combine (void)
14397 {
14398   int rebuild_jump_labels_after_combine;
14399 
14400   df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14401   df_note_add_problem ();
14402   df_analyze ();
14403 
14404   regstat_init_n_sets_and_refs ();
14405   reg_n_sets_max = max_reg_num ();
14406 
14407   rebuild_jump_labels_after_combine
14408     = combine_instructions (get_insns (), max_reg_num ());
14409 
14410   /* Combining insns may have turned an indirect jump into a
14411      direct jump.  Rebuild the JUMP_LABEL fields of jumping
14412      instructions.  */
14413   if (rebuild_jump_labels_after_combine)
14414     {
14415       timevar_push (TV_JUMP);
14416       rebuild_jump_labels (get_insns ());
14417       cleanup_cfg (0);
14418       timevar_pop (TV_JUMP);
14419     }
14420 
14421   regstat_free_n_sets_and_refs ();
14422   return 0;
14423 }
14424 
14425 namespace {
14426 
14427 const pass_data pass_data_combine =
14428 {
14429   RTL_PASS, /* type */
14430   "combine", /* name */
14431   OPTGROUP_NONE, /* optinfo_flags */
14432   TV_COMBINE, /* tv_id */
14433   PROP_cfglayout, /* properties_required */
14434   0, /* properties_provided */
14435   0, /* properties_destroyed */
14436   0, /* todo_flags_start */
14437   TODO_df_finish, /* todo_flags_finish */
14438 };
14439 
14440 class pass_combine : public rtl_opt_pass
14441 {
14442 public:
14443   pass_combine (gcc::context *ctxt)
14444     : rtl_opt_pass (pass_data_combine, ctxt)
14445   {}
14446 
14447   /* opt_pass methods: */
14448   virtual bool gate (function *) { return (optimize > 0); }
14449   virtual unsigned int execute (function *)
14450     {
14451       return rest_of_handle_combine ();
14452     }
14453 
14454 }; // class pass_combine
14455 
14456 } // anon namespace
14457 
14458 rtl_opt_pass *
14459 make_pass_combine (gcc::context *ctxt)
14460 {
14461   return new pass_combine (ctxt);
14462 }
14463