xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/combine.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /* Optimize by combining instructions for GNU compiler.
2    Copyright (C) 1987-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21    Portable Optimizer, but redone to work on our list-structured
22    representation for RTL instead of their string representation.
23 
24    The LOG_LINKS of each insn identify the most recent assignment
25    to each REG used in the insn.  It is a list of previous insns,
26    each of which contains a SET for a REG that is used in this insn
27    and not used or set in between.  LOG_LINKs never cross basic blocks.
28    They were set up by the preceding pass (lifetime analysis).
29 
30    We try to combine each pair of insns joined by a logical link.
31    We also try to combine triplets of insns A, B and C when C has
32    a link back to B and B has a link back to A.  Likewise for a
33    small number of quadruplets of insns A, B, C and D for which
34    there's high likelihood of of success.
35 
36    LOG_LINKS does not have links for use of the CC0.  They don't
37    need to, because the insn that sets the CC0 is always immediately
38    before the insn that tests it.  So we always regard a branch
39    insn as having a logical link to the preceding insn.  The same is true
40    for an insn explicitly using CC0.
41 
42    We check (with use_crosses_set_p) to avoid combining in such a way
43    as to move a computation to a place where its value would be different.
44 
45    Combination is done by mathematically substituting the previous
46    insn(s) values for the regs they set into the expressions in
47    the later insns that refer to these regs.  If the result is a valid insn
48    for our target machine, according to the machine description,
49    we install it, delete the earlier insns, and update the data flow
50    information (LOG_LINKS and REG_NOTES) for what we did.
51 
52    There are a few exceptions where the dataflow information isn't
53    completely updated (however this is only a local issue since it is
54    regenerated before the next pass that uses it):
55 
56    - reg_live_length is not updated
57    - reg_n_refs is not adjusted in the rare case when a register is
58      no longer required in a computation
59    - there are extremely rare cases (see distribute_notes) when a
60      REG_DEAD note is lost
61    - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62      removed because there is no way to know which register it was
63      linking
64 
65    To simplify substitution, we combine only when the earlier insn(s)
66    consist of only a single assignment.  To simplify updating afterward,
67    we never combine when a subroutine call appears in the middle.
68 
69    Since we do not represent assignments to CC0 explicitly except when that
70    is all an insn does, there is no LOG_LINKS entry in an insn that uses
71    the condition code for the insn that set the condition code.
72    Fortunately, these two insns must be consecutive.
73    Therefore, every JUMP_INSN is taken to have an implicit logical link
74    to the preceding insn.  This is not quite right, since non-jumps can
75    also use the condition code; but in practice such insns would not
76    combine anyway.  */
77 
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "tm.h"
82 #include "rtl.h"
83 #include "tree.h"
84 #include "tm_p.h"
85 #include "flags.h"
86 #include "regs.h"
87 #include "hard-reg-set.h"
88 #include "basic-block.h"
89 #include "insn-config.h"
90 #include "function.h"
91 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
92 #include "expr.h"
93 #include "insn-attr.h"
94 #include "recog.h"
95 #include "diagnostic-core.h"
96 #include "target.h"
97 #include "optabs.h"
98 #include "insn-codes.h"
99 #include "rtlhooks-def.h"
100 #include "params.h"
101 #include "tree-pass.h"
102 #include "df.h"
103 #include "valtrack.h"
104 #include "cgraph.h"
105 #include "obstack.h"
106 
107 /* Number of attempts to combine instructions in this function.  */
108 
109 static int combine_attempts;
110 
111 /* Number of attempts that got as far as substitution in this function.  */
112 
113 static int combine_merges;
114 
115 /* Number of instructions combined with added SETs in this function.  */
116 
117 static int combine_extras;
118 
119 /* Number of instructions combined in this function.  */
120 
121 static int combine_successes;
122 
123 /* Totals over entire compilation.  */
124 
125 static int total_attempts, total_merges, total_extras, total_successes;
126 
127 /* combine_instructions may try to replace the right hand side of the
128    second instruction with the value of an associated REG_EQUAL note
129    before throwing it at try_combine.  That is problematic when there
130    is a REG_DEAD note for a register used in the old right hand side
131    and can cause distribute_notes to do wrong things.  This is the
132    second instruction if it has been so modified, null otherwise.  */
133 
134 static rtx i2mod;
135 
136 /* When I2MOD is nonnull, this is a copy of the old right hand side.  */
137 
138 static rtx i2mod_old_rhs;
139 
140 /* When I2MOD is nonnull, this is a copy of the new right hand side.  */
141 
142 static rtx i2mod_new_rhs;
143 
144 typedef struct reg_stat_struct {
145   /* Record last point of death of (hard or pseudo) register n.  */
146   rtx				last_death;
147 
148   /* Record last point of modification of (hard or pseudo) register n.  */
149   rtx				last_set;
150 
151   /* The next group of fields allows the recording of the last value assigned
152      to (hard or pseudo) register n.  We use this information to see if an
153      operation being processed is redundant given a prior operation performed
154      on the register.  For example, an `and' with a constant is redundant if
155      all the zero bits are already known to be turned off.
156 
157      We use an approach similar to that used by cse, but change it in the
158      following ways:
159 
160      (1) We do not want to reinitialize at each label.
161      (2) It is useful, but not critical, to know the actual value assigned
162 	 to a register.  Often just its form is helpful.
163 
164      Therefore, we maintain the following fields:
165 
166      last_set_value		the last value assigned
167      last_set_label		records the value of label_tick when the
168 				register was assigned
169      last_set_table_tick	records the value of label_tick when a
170 				value using the register is assigned
171      last_set_invalid		set to nonzero when it is not valid
172 				to use the value of this register in some
173 				register's value
174 
175      To understand the usage of these tables, it is important to understand
176      the distinction between the value in last_set_value being valid and
177      the register being validly contained in some other expression in the
178      table.
179 
180      (The next two parameters are out of date).
181 
182      reg_stat[i].last_set_value is valid if it is nonzero, and either
183      reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
184 
185      Register I may validly appear in any expression returned for the value
186      of another register if reg_n_sets[i] is 1.  It may also appear in the
187      value for register J if reg_stat[j].last_set_invalid is zero, or
188      reg_stat[i].last_set_label < reg_stat[j].last_set_label.
189 
190      If an expression is found in the table containing a register which may
191      not validly appear in an expression, the register is replaced by
192      something that won't match, (clobber (const_int 0)).  */
193 
194   /* Record last value assigned to (hard or pseudo) register n.  */
195 
196   rtx				last_set_value;
197 
198   /* Record the value of label_tick when an expression involving register n
199      is placed in last_set_value.  */
200 
201   int				last_set_table_tick;
202 
203   /* Record the value of label_tick when the value for register n is placed in
204      last_set_value.  */
205 
206   int				last_set_label;
207 
208   /* These fields are maintained in parallel with last_set_value and are
209      used to store the mode in which the register was last set, the bits
210      that were known to be zero when it was last set, and the number of
211      sign bits copies it was known to have when it was last set.  */
212 
213   unsigned HOST_WIDE_INT	last_set_nonzero_bits;
214   char				last_set_sign_bit_copies;
215   ENUM_BITFIELD(machine_mode)	last_set_mode : 8;
216 
217   /* Set nonzero if references to register n in expressions should not be
218      used.  last_set_invalid is set nonzero when this register is being
219      assigned to and last_set_table_tick == label_tick.  */
220 
221   char				last_set_invalid;
222 
223   /* Some registers that are set more than once and used in more than one
224      basic block are nevertheless always set in similar ways.  For example,
225      a QImode register may be loaded from memory in two places on a machine
226      where byte loads zero extend.
227 
228      We record in the following fields if a register has some leading bits
229      that are always equal to the sign bit, and what we know about the
230      nonzero bits of a register, specifically which bits are known to be
231      zero.
232 
233      If an entry is zero, it means that we don't know anything special.  */
234 
235   unsigned char			sign_bit_copies;
236 
237   unsigned HOST_WIDE_INT	nonzero_bits;
238 
239   /* Record the value of the label_tick when the last truncation
240      happened.  The field truncated_to_mode is only valid if
241      truncation_label == label_tick.  */
242 
243   int				truncation_label;
244 
245   /* Record the last truncation seen for this register.  If truncation
246      is not a nop to this mode we might be able to save an explicit
247      truncation if we know that value already contains a truncated
248      value.  */
249 
250   ENUM_BITFIELD(machine_mode)	truncated_to_mode : 8;
251 } reg_stat_type;
252 
253 
254 static vec<reg_stat_type> reg_stat;
255 
256 /* Record the luid of the last insn that invalidated memory
257    (anything that writes memory, and subroutine calls, but not pushes).  */
258 
259 static int mem_last_set;
260 
261 /* Record the luid of the last CALL_INSN
262    so we can tell whether a potential combination crosses any calls.  */
263 
264 static int last_call_luid;
265 
266 /* When `subst' is called, this is the insn that is being modified
267    (by combining in a previous insn).  The PATTERN of this insn
268    is still the old pattern partially modified and it should not be
269    looked at, but this may be used to examine the successors of the insn
270    to judge whether a simplification is valid.  */
271 
272 static rtx subst_insn;
273 
274 /* This is the lowest LUID that `subst' is currently dealing with.
275    get_last_value will not return a value if the register was set at or
276    after this LUID.  If not for this mechanism, we could get confused if
277    I2 or I1 in try_combine were an insn that used the old value of a register
278    to obtain a new value.  In that case, we might erroneously get the
279    new value of the register when we wanted the old one.  */
280 
281 static int subst_low_luid;
282 
283 /* This contains any hard registers that are used in newpat; reg_dead_at_p
284    must consider all these registers to be always live.  */
285 
286 static HARD_REG_SET newpat_used_regs;
287 
288 /* This is an insn to which a LOG_LINKS entry has been added.  If this
289    insn is the earlier than I2 or I3, combine should rescan starting at
290    that location.  */
291 
292 static rtx added_links_insn;
293 
294 /* Basic block in which we are performing combines.  */
295 static basic_block this_basic_block;
296 static bool optimize_this_for_speed_p;
297 
298 
299 /* Length of the currently allocated uid_insn_cost array.  */
300 
301 static int max_uid_known;
302 
303 /* The following array records the insn_rtx_cost for every insn
304    in the instruction stream.  */
305 
306 static int *uid_insn_cost;
307 
308 /* The following array records the LOG_LINKS for every insn in the
309    instruction stream as struct insn_link pointers.  */
310 
311 struct insn_link {
312   rtx insn;
313   struct insn_link *next;
314 };
315 
316 static struct insn_link **uid_log_links;
317 
318 #define INSN_COST(INSN)		(uid_insn_cost[INSN_UID (INSN)])
319 #define LOG_LINKS(INSN)		(uid_log_links[INSN_UID (INSN)])
320 
321 #define FOR_EACH_LOG_LINK(L, INSN)				\
322   for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
323 
324 /* Links for LOG_LINKS are allocated from this obstack.  */
325 
326 static struct obstack insn_link_obstack;
327 
328 /* Allocate a link.  */
329 
330 static inline struct insn_link *
331 alloc_insn_link (rtx insn, struct insn_link *next)
332 {
333   struct insn_link *l
334     = (struct insn_link *) obstack_alloc (&insn_link_obstack,
335 					  sizeof (struct insn_link));
336   l->insn = insn;
337   l->next = next;
338   return l;
339 }
340 
341 /* Incremented for each basic block.  */
342 
343 static int label_tick;
344 
345 /* Reset to label_tick for each extended basic block in scanning order.  */
346 
347 static int label_tick_ebb_start;
348 
349 /* Mode used to compute significance in reg_stat[].nonzero_bits.  It is the
350    largest integer mode that can fit in HOST_BITS_PER_WIDE_INT.  */
351 
352 static enum machine_mode nonzero_bits_mode;
353 
354 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
355    be safely used.  It is zero while computing them and after combine has
356    completed.  This former test prevents propagating values based on
357    previously set values, which can be incorrect if a variable is modified
358    in a loop.  */
359 
360 static int nonzero_sign_valid;
361 
362 
363 /* Record one modification to rtl structure
364    to be undone by storing old_contents into *where.  */
365 
366 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
367 
368 struct undo
369 {
370   struct undo *next;
371   enum undo_kind kind;
372   union { rtx r; int i; enum machine_mode m; struct insn_link *l; } old_contents;
373   union { rtx *r; int *i; struct insn_link **l; } where;
374 };
375 
376 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
377    num_undo says how many are currently recorded.
378 
379    other_insn is nonzero if we have modified some other insn in the process
380    of working on subst_insn.  It must be verified too.  */
381 
382 struct undobuf
383 {
384   struct undo *undos;
385   struct undo *frees;
386   rtx other_insn;
387 };
388 
389 static struct undobuf undobuf;
390 
391 /* Number of times the pseudo being substituted for
392    was found and replaced.  */
393 
394 static int n_occurrences;
395 
396 static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
397 					 enum machine_mode,
398 					 unsigned HOST_WIDE_INT,
399 					 unsigned HOST_WIDE_INT *);
400 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
401 						enum machine_mode,
402 						unsigned int, unsigned int *);
403 static void do_SUBST (rtx *, rtx);
404 static void do_SUBST_INT (int *, int);
405 static void init_reg_last (void);
406 static void setup_incoming_promotions (rtx);
407 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
408 static int cant_combine_insn_p (rtx);
409 static int can_combine_p (rtx, rtx, rtx, rtx, rtx, rtx, rtx *, rtx *);
410 static int combinable_i3pat (rtx, rtx *, rtx, rtx, rtx, int, int, rtx *);
411 static int contains_muldiv (rtx);
412 static rtx try_combine (rtx, rtx, rtx, rtx, int *, rtx);
413 static void undo_all (void);
414 static void undo_commit (void);
415 static rtx *find_split_point (rtx *, rtx, bool);
416 static rtx subst (rtx, rtx, rtx, int, int, int);
417 static rtx combine_simplify_rtx (rtx, enum machine_mode, int, int);
418 static rtx simplify_if_then_else (rtx);
419 static rtx simplify_set (rtx);
420 static rtx simplify_logical (rtx);
421 static rtx expand_compound_operation (rtx);
422 static const_rtx expand_field_assignment (const_rtx);
423 static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
424 			    rtx, unsigned HOST_WIDE_INT, int, int, int);
425 static rtx extract_left_shift (rtx, int);
426 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
427 			      unsigned HOST_WIDE_INT *);
428 static rtx canon_reg_for_combine (rtx, rtx);
429 static rtx force_to_mode (rtx, enum machine_mode,
430 			  unsigned HOST_WIDE_INT, int);
431 static rtx if_then_else_cond (rtx, rtx *, rtx *);
432 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
433 static int rtx_equal_for_field_assignment_p (rtx, rtx);
434 static rtx make_field_assignment (rtx);
435 static rtx apply_distributive_law (rtx);
436 static rtx distribute_and_simplify_rtx (rtx, int);
437 static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
438 				     unsigned HOST_WIDE_INT);
439 static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
440 				   unsigned HOST_WIDE_INT);
441 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
442 			    HOST_WIDE_INT, enum machine_mode, int *);
443 static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
444 static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
445 				 int);
446 static int recog_for_combine (rtx *, rtx, rtx *);
447 static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
448 static enum rtx_code simplify_compare_const (enum rtx_code, rtx, rtx *);
449 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
450 static void update_table_tick (rtx);
451 static void record_value_for_reg (rtx, rtx, rtx);
452 static void check_promoted_subreg (rtx, rtx);
453 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
454 static void record_dead_and_set_regs (rtx);
455 static int get_last_value_validate (rtx *, rtx, int, int);
456 static rtx get_last_value (const_rtx);
457 static int use_crosses_set_p (const_rtx, int);
458 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
459 static int reg_dead_at_p (rtx, rtx);
460 static void move_deaths (rtx, rtx, int, rtx, rtx *);
461 static int reg_bitfield_target_p (rtx, rtx);
462 static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx, rtx);
463 static void distribute_links (struct insn_link *);
464 static void mark_used_regs_combine (rtx);
465 static void record_promoted_value (rtx, rtx);
466 static int unmentioned_reg_p_1 (rtx *, void *);
467 static bool unmentioned_reg_p (rtx, rtx);
468 static int record_truncated_value (rtx *, void *);
469 static void record_truncated_values (rtx *, void *);
470 static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
471 static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);
472 
473 
474 /* It is not safe to use ordinary gen_lowpart in combine.
475    See comments in gen_lowpart_for_combine.  */
476 #undef RTL_HOOKS_GEN_LOWPART
477 #define RTL_HOOKS_GEN_LOWPART              gen_lowpart_for_combine
478 
479 /* Our implementation of gen_lowpart never emits a new pseudo.  */
480 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
481 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT      gen_lowpart_for_combine
482 
483 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
484 #define RTL_HOOKS_REG_NONZERO_REG_BITS     reg_nonzero_bits_for_combine
485 
486 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
487 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES  reg_num_sign_bit_copies_for_combine
488 
489 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
490 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE    reg_truncated_to_mode
491 
492 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
493 
494 
495 /* Convenience wrapper for the canonicalize_comparison target hook.
496    Target hooks cannot use enum rtx_code.  */
497 static inline void
498 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
499 				bool op0_preserve_value)
500 {
501   int code_int = (int)*code;
502   targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
503   *code = (enum rtx_code)code_int;
504 }
505 
506 /* Try to split PATTERN found in INSN.  This returns NULL_RTX if
507    PATTERN can not be split.  Otherwise, it returns an insn sequence.
508    This is a wrapper around split_insns which ensures that the
509    reg_stat vector is made larger if the splitter creates a new
510    register.  */
511 
512 static rtx
513 combine_split_insns (rtx pattern, rtx insn)
514 {
515   rtx ret;
516   unsigned int nregs;
517 
518   ret = split_insns (pattern, insn);
519   nregs = max_reg_num ();
520   if (nregs > reg_stat.length ())
521     reg_stat.safe_grow_cleared (nregs);
522   return ret;
523 }
524 
525 /* This is used by find_single_use to locate an rtx in LOC that
526    contains exactly one use of DEST, which is typically either a REG
527    or CC0.  It returns a pointer to the innermost rtx expression
528    containing DEST.  Appearances of DEST that are being used to
529    totally replace it are not counted.  */
530 
531 static rtx *
532 find_single_use_1 (rtx dest, rtx *loc)
533 {
534   rtx x = *loc;
535   enum rtx_code code = GET_CODE (x);
536   rtx *result = NULL;
537   rtx *this_result;
538   int i;
539   const char *fmt;
540 
541   switch (code)
542     {
543     case CONST:
544     case LABEL_REF:
545     case SYMBOL_REF:
546     CASE_CONST_ANY:
547     case CLOBBER:
548       return 0;
549 
550     case SET:
551       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
552 	 of a REG that occupies all of the REG, the insn uses DEST if
553 	 it is mentioned in the destination or the source.  Otherwise, we
554 	 need just check the source.  */
555       if (GET_CODE (SET_DEST (x)) != CC0
556 	  && GET_CODE (SET_DEST (x)) != PC
557 	  && !REG_P (SET_DEST (x))
558 	  && ! (GET_CODE (SET_DEST (x)) == SUBREG
559 		&& REG_P (SUBREG_REG (SET_DEST (x)))
560 		&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
561 		      + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
562 		    == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
563 			 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
564 	break;
565 
566       return find_single_use_1 (dest, &SET_SRC (x));
567 
568     case MEM:
569     case SUBREG:
570       return find_single_use_1 (dest, &XEXP (x, 0));
571 
572     default:
573       break;
574     }
575 
576   /* If it wasn't one of the common cases above, check each expression and
577      vector of this code.  Look for a unique usage of DEST.  */
578 
579   fmt = GET_RTX_FORMAT (code);
580   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
581     {
582       if (fmt[i] == 'e')
583 	{
584 	  if (dest == XEXP (x, i)
585 	      || (REG_P (dest) && REG_P (XEXP (x, i))
586 		  && REGNO (dest) == REGNO (XEXP (x, i))))
587 	    this_result = loc;
588 	  else
589 	    this_result = find_single_use_1 (dest, &XEXP (x, i));
590 
591 	  if (result == NULL)
592 	    result = this_result;
593 	  else if (this_result)
594 	    /* Duplicate usage.  */
595 	    return NULL;
596 	}
597       else if (fmt[i] == 'E')
598 	{
599 	  int j;
600 
601 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
602 	    {
603 	      if (XVECEXP (x, i, j) == dest
604 		  || (REG_P (dest)
605 		      && REG_P (XVECEXP (x, i, j))
606 		      && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
607 		this_result = loc;
608 	      else
609 		this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
610 
611 	      if (result == NULL)
612 		result = this_result;
613 	      else if (this_result)
614 		return NULL;
615 	    }
616 	}
617     }
618 
619   return result;
620 }
621 
622 
623 /* See if DEST, produced in INSN, is used only a single time in the
624    sequel.  If so, return a pointer to the innermost rtx expression in which
625    it is used.
626 
627    If PLOC is nonzero, *PLOC is set to the insn containing the single use.
628 
629    If DEST is cc0_rtx, we look only at the next insn.  In that case, we don't
630    care about REG_DEAD notes or LOG_LINKS.
631 
632    Otherwise, we find the single use by finding an insn that has a
633    LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST.  If DEST is
634    only referenced once in that insn, we know that it must be the first
635    and last insn referencing DEST.  */
636 
637 static rtx *
638 find_single_use (rtx dest, rtx insn, rtx *ploc)
639 {
640   basic_block bb;
641   rtx next;
642   rtx *result;
643   struct insn_link *link;
644 
645 #ifdef HAVE_cc0
646   if (dest == cc0_rtx)
647     {
648       next = NEXT_INSN (insn);
649       if (next == 0
650 	  || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
651 	return 0;
652 
653       result = find_single_use_1 (dest, &PATTERN (next));
654       if (result && ploc)
655 	*ploc = next;
656       return result;
657     }
658 #endif
659 
660   if (!REG_P (dest))
661     return 0;
662 
663   bb = BLOCK_FOR_INSN (insn);
664   for (next = NEXT_INSN (insn);
665        next && BLOCK_FOR_INSN (next) == bb;
666        next = NEXT_INSN (next))
667     if (INSN_P (next) && dead_or_set_p (next, dest))
668       {
669 	FOR_EACH_LOG_LINK (link, next)
670 	  if (link->insn == insn)
671 	    break;
672 
673 	if (link)
674 	  {
675 	    result = find_single_use_1 (dest, &PATTERN (next));
676 	    if (ploc)
677 	      *ploc = next;
678 	    return result;
679 	  }
680       }
681 
682   return 0;
683 }
684 
685 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
686    insn.  The substitution can be undone by undo_all.  If INTO is already
687    set to NEWVAL, do not record this change.  Because computing NEWVAL might
688    also call SUBST, we have to compute it before we put anything into
689    the undo table.  */
690 
691 static void
692 do_SUBST (rtx *into, rtx newval)
693 {
694   struct undo *buf;
695   rtx oldval = *into;
696 
697   if (oldval == newval)
698     return;
699 
700   /* We'd like to catch as many invalid transformations here as
701      possible.  Unfortunately, there are way too many mode changes
702      that are perfectly valid, so we'd waste too much effort for
703      little gain doing the checks here.  Focus on catching invalid
704      transformations involving integer constants.  */
705   if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
706       && CONST_INT_P (newval))
707     {
708       /* Sanity check that we're replacing oldval with a CONST_INT
709 	 that is a valid sign-extension for the original mode.  */
710       gcc_assert (INTVAL (newval)
711 		  == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
712 
713       /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
714 	 CONST_INT is not valid, because after the replacement, the
715 	 original mode would be gone.  Unfortunately, we can't tell
716 	 when do_SUBST is called to replace the operand thereof, so we
717 	 perform this test on oldval instead, checking whether an
718 	 invalid replacement took place before we got here.  */
719       gcc_assert (!(GET_CODE (oldval) == SUBREG
720 		    && CONST_INT_P (SUBREG_REG (oldval))));
721       gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
722 		    && CONST_INT_P (XEXP (oldval, 0))));
723     }
724 
725   if (undobuf.frees)
726     buf = undobuf.frees, undobuf.frees = buf->next;
727   else
728     buf = XNEW (struct undo);
729 
730   buf->kind = UNDO_RTX;
731   buf->where.r = into;
732   buf->old_contents.r = oldval;
733   *into = newval;
734 
735   buf->next = undobuf.undos, undobuf.undos = buf;
736 }
737 
738 #define SUBST(INTO, NEWVAL)	do_SUBST(&(INTO), (NEWVAL))
739 
740 /* Similar to SUBST, but NEWVAL is an int expression.  Note that substitution
741    for the value of a HOST_WIDE_INT value (including CONST_INT) is
742    not safe.  */
743 
744 static void
745 do_SUBST_INT (int *into, int newval)
746 {
747   struct undo *buf;
748   int oldval = *into;
749 
750   if (oldval == newval)
751     return;
752 
753   if (undobuf.frees)
754     buf = undobuf.frees, undobuf.frees = buf->next;
755   else
756     buf = XNEW (struct undo);
757 
758   buf->kind = UNDO_INT;
759   buf->where.i = into;
760   buf->old_contents.i = oldval;
761   *into = newval;
762 
763   buf->next = undobuf.undos, undobuf.undos = buf;
764 }
765 
766 #define SUBST_INT(INTO, NEWVAL)  do_SUBST_INT(&(INTO), (NEWVAL))
767 
768 /* Similar to SUBST, but just substitute the mode.  This is used when
769    changing the mode of a pseudo-register, so that any other
770    references to the entry in the regno_reg_rtx array will change as
771    well.  */
772 
773 static void
774 do_SUBST_MODE (rtx *into, enum machine_mode newval)
775 {
776   struct undo *buf;
777   enum machine_mode oldval = GET_MODE (*into);
778 
779   if (oldval == newval)
780     return;
781 
782   if (undobuf.frees)
783     buf = undobuf.frees, undobuf.frees = buf->next;
784   else
785     buf = XNEW (struct undo);
786 
787   buf->kind = UNDO_MODE;
788   buf->where.r = into;
789   buf->old_contents.m = oldval;
790   adjust_reg_mode (*into, newval);
791 
792   buf->next = undobuf.undos, undobuf.undos = buf;
793 }
794 
795 #define SUBST_MODE(INTO, NEWVAL)  do_SUBST_MODE(&(INTO), (NEWVAL))
796 
797 #ifndef HAVE_cc0
798 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression.  */
799 
800 static void
801 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
802 {
803   struct undo *buf;
804   struct insn_link * oldval = *into;
805 
806   if (oldval == newval)
807     return;
808 
809   if (undobuf.frees)
810     buf = undobuf.frees, undobuf.frees = buf->next;
811   else
812     buf = XNEW (struct undo);
813 
814   buf->kind = UNDO_LINKS;
815   buf->where.l = into;
816   buf->old_contents.l = oldval;
817   *into = newval;
818 
819   buf->next = undobuf.undos, undobuf.undos = buf;
820 }
821 
822 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
823 #endif
824 
825 /* Subroutine of try_combine.  Determine whether the replacement patterns
826    NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
827    than the original sequence I0, I1, I2, I3 and undobuf.other_insn.  Note
828    that I0, I1 and/or NEWI2PAT may be NULL_RTX.  Similarly, NEWOTHERPAT and
829    undobuf.other_insn may also both be NULL_RTX.  Return false if the cost
830    of all the instructions can be estimated and the replacements are more
831    expensive than the original sequence.  */
832 
833 static bool
834 combine_validate_cost (rtx i0, rtx i1, rtx i2, rtx i3, rtx newpat,
835 		       rtx newi2pat, rtx newotherpat)
836 {
837   int i0_cost, i1_cost, i2_cost, i3_cost;
838   int new_i2_cost, new_i3_cost;
839   int old_cost, new_cost;
840 
841   /* Lookup the original insn_rtx_costs.  */
842   i2_cost = INSN_COST (i2);
843   i3_cost = INSN_COST (i3);
844 
845   if (i1)
846     {
847       i1_cost = INSN_COST (i1);
848       if (i0)
849 	{
850 	  i0_cost = INSN_COST (i0);
851 	  old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
852 		      ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
853 	}
854       else
855 	{
856 	  old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
857 		      ? i1_cost + i2_cost + i3_cost : 0);
858 	  i0_cost = 0;
859 	}
860     }
861   else
862     {
863       old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
864       i1_cost = i0_cost = 0;
865     }
866 
867   /* Calculate the replacement insn_rtx_costs.  */
868   new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
869   if (newi2pat)
870     {
871       new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
872       new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
873 		 ? new_i2_cost + new_i3_cost : 0;
874     }
875   else
876     {
877       new_cost = new_i3_cost;
878       new_i2_cost = 0;
879     }
880 
881   if (undobuf.other_insn)
882     {
883       int old_other_cost, new_other_cost;
884 
885       old_other_cost = INSN_COST (undobuf.other_insn);
886       new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
887       if (old_other_cost > 0 && new_other_cost > 0)
888 	{
889 	  old_cost += old_other_cost;
890 	  new_cost += new_other_cost;
891 	}
892       else
893 	old_cost = 0;
894     }
895 
896   /* Disallow this combination if both new_cost and old_cost are greater than
897      zero, and new_cost is greater than old cost.  */
898   if (old_cost > 0 && new_cost > old_cost)
899     {
900       if (dump_file)
901 	{
902 	  if (i0)
903 	    {
904 	      fprintf (dump_file,
905 		       "rejecting combination of insns %d, %d, %d and %d\n",
906 		       INSN_UID (i0), INSN_UID (i1), INSN_UID (i2),
907 		       INSN_UID (i3));
908 	      fprintf (dump_file, "original costs %d + %d + %d + %d = %d\n",
909 		       i0_cost, i1_cost, i2_cost, i3_cost, old_cost);
910 	    }
911 	  else if (i1)
912 	    {
913 	      fprintf (dump_file,
914 		       "rejecting combination of insns %d, %d and %d\n",
915 		       INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
916 	      fprintf (dump_file, "original costs %d + %d + %d = %d\n",
917 		       i1_cost, i2_cost, i3_cost, old_cost);
918 	    }
919 	  else
920 	    {
921 	      fprintf (dump_file,
922 		       "rejecting combination of insns %d and %d\n",
923 		       INSN_UID (i2), INSN_UID (i3));
924 	      fprintf (dump_file, "original costs %d + %d = %d\n",
925 		       i2_cost, i3_cost, old_cost);
926 	    }
927 
928 	  if (newi2pat)
929 	    {
930 	      fprintf (dump_file, "replacement costs %d + %d = %d\n",
931 		       new_i2_cost, new_i3_cost, new_cost);
932 	    }
933 	  else
934 	    fprintf (dump_file, "replacement cost %d\n", new_cost);
935 	}
936 
937       return false;
938     }
939 
940   /* Update the uid_insn_cost array with the replacement costs.  */
941   INSN_COST (i2) = new_i2_cost;
942   INSN_COST (i3) = new_i3_cost;
943   if (i1)
944     {
945       INSN_COST (i1) = 0;
946       if (i0)
947 	INSN_COST (i0) = 0;
948     }
949 
950   return true;
951 }
952 
953 
954 /* Delete any insns that copy a register to itself.  */
955 
956 static void
957 delete_noop_moves (void)
958 {
959   rtx insn, next;
960   basic_block bb;
961 
962   FOR_EACH_BB (bb)
963     {
964       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
965 	{
966 	  next = NEXT_INSN (insn);
967 	  if (INSN_P (insn) && noop_move_p (insn))
968 	    {
969 	      if (dump_file)
970 		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
971 
972 	      delete_insn_and_edges (insn);
973 	    }
974 	}
975     }
976 }
977 
978 
979 /* Fill in log links field for all insns.  */
980 
981 static void
982 create_log_links (void)
983 {
984   basic_block bb;
985   rtx *next_use, insn;
986   df_ref *def_vec, *use_vec;
987 
988   next_use = XCNEWVEC (rtx, max_reg_num ());
989 
990   /* Pass through each block from the end, recording the uses of each
991      register and establishing log links when def is encountered.
992      Note that we do not clear next_use array in order to save time,
993      so we have to test whether the use is in the same basic block as def.
994 
995      There are a few cases below when we do not consider the definition or
996      usage -- these are taken from original flow.c did. Don't ask me why it is
997      done this way; I don't know and if it works, I don't want to know.  */
998 
999   FOR_EACH_BB (bb)
1000     {
1001       FOR_BB_INSNS_REVERSE (bb, insn)
1002         {
1003           if (!NONDEBUG_INSN_P (insn))
1004             continue;
1005 
1006 	  /* Log links are created only once.  */
1007 	  gcc_assert (!LOG_LINKS (insn));
1008 
1009           for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
1010             {
1011 	      df_ref def = *def_vec;
1012               int regno = DF_REF_REGNO (def);
1013               rtx use_insn;
1014 
1015               if (!next_use[regno])
1016                 continue;
1017 
1018               /* Do not consider if it is pre/post modification in MEM.  */
1019               if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1020                 continue;
1021 
1022               /* Do not make the log link for frame pointer.  */
1023               if ((regno == FRAME_POINTER_REGNUM
1024                    && (! reload_completed || frame_pointer_needed))
1025 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1026                   || (regno == HARD_FRAME_POINTER_REGNUM
1027                       && (! reload_completed || frame_pointer_needed))
1028 #endif
1029 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1030                   || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
1031 #endif
1032                   )
1033                 continue;
1034 
1035               use_insn = next_use[regno];
1036               if (BLOCK_FOR_INSN (use_insn) == bb)
1037                 {
1038                   /* flow.c claimed:
1039 
1040                      We don't build a LOG_LINK for hard registers contained
1041                      in ASM_OPERANDs.  If these registers get replaced,
1042                      we might wind up changing the semantics of the insn,
1043                      even if reload can make what appear to be valid
1044                      assignments later.  */
1045                   if (regno >= FIRST_PSEUDO_REGISTER
1046                       || asm_noperands (PATTERN (use_insn)) < 0)
1047 		    {
1048 		      /* Don't add duplicate links between instructions.  */
1049 		      struct insn_link *links;
1050 		      FOR_EACH_LOG_LINK (links, use_insn)
1051 		        if (insn == links->insn)
1052 			  break;
1053 
1054 		      if (!links)
1055 			LOG_LINKS (use_insn)
1056 			  = alloc_insn_link (insn, LOG_LINKS (use_insn));
1057 		    }
1058                 }
1059               next_use[regno] = NULL_RTX;
1060             }
1061 
1062           for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
1063             {
1064 	      df_ref use = *use_vec;
1065 	      int regno = DF_REF_REGNO (use);
1066 
1067               /* Do not consider the usage of the stack pointer
1068 		 by function call.  */
1069               if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1070                 continue;
1071 
1072               next_use[regno] = insn;
1073             }
1074         }
1075     }
1076 
1077   free (next_use);
1078 }
1079 
1080 /* Walk the LOG_LINKS of insn B to see if we find a reference to A.  Return
1081    true if we found a LOG_LINK that proves that A feeds B.  This only works
1082    if there are no instructions between A and B which could have a link
1083    depending on A, since in that case we would not record a link for B.
1084    We also check the implicit dependency created by a cc0 setter/user
1085    pair.  */
1086 
1087 static bool
1088 insn_a_feeds_b (rtx a, rtx b)
1089 {
1090   struct insn_link *links;
1091   FOR_EACH_LOG_LINK (links, b)
1092     if (links->insn == a)
1093       return true;
1094 #ifdef HAVE_cc0
1095   if (sets_cc0_p (a))
1096     return true;
1097 #endif
1098   return false;
1099 }
1100 
1101 /* Main entry point for combiner.  F is the first insn of the function.
1102    NREGS is the first unused pseudo-reg number.
1103 
1104    Return nonzero if the combiner has turned an indirect jump
1105    instruction into a direct jump.  */
1106 static int
1107 combine_instructions (rtx f, unsigned int nregs)
1108 {
1109   rtx insn, next;
1110 #ifdef HAVE_cc0
1111   rtx prev;
1112 #endif
1113   struct insn_link *links, *nextlinks;
1114   rtx first;
1115   basic_block last_bb;
1116 
1117   int new_direct_jump_p = 0;
1118 
1119   for (first = f; first && !INSN_P (first); )
1120     first = NEXT_INSN (first);
1121   if (!first)
1122     return 0;
1123 
1124   combine_attempts = 0;
1125   combine_merges = 0;
1126   combine_extras = 0;
1127   combine_successes = 0;
1128 
1129   rtl_hooks = combine_rtl_hooks;
1130 
1131   reg_stat.safe_grow_cleared (nregs);
1132 
1133   init_recog_no_volatile ();
1134 
1135   /* Allocate array for insn info.  */
1136   max_uid_known = get_max_uid ();
1137   uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1138   uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1139   gcc_obstack_init (&insn_link_obstack);
1140 
1141   nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1142 
1143   /* Don't use reg_stat[].nonzero_bits when computing it.  This can cause
1144      problems when, for example, we have j <<= 1 in a loop.  */
1145 
1146   nonzero_sign_valid = 0;
1147   label_tick = label_tick_ebb_start = 1;
1148 
1149   /* Scan all SETs and see if we can deduce anything about what
1150      bits are known to be zero for some registers and how many copies
1151      of the sign bit are known to exist for those registers.
1152 
1153      Also set any known values so that we can use it while searching
1154      for what bits are known to be set.  */
1155 
1156   setup_incoming_promotions (first);
1157   /* Allow the entry block and the first block to fall into the same EBB.
1158      Conceptually the incoming promotions are assigned to the entry block.  */
1159   last_bb = ENTRY_BLOCK_PTR;
1160 
1161   create_log_links ();
1162   FOR_EACH_BB (this_basic_block)
1163     {
1164       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1165       last_call_luid = 0;
1166       mem_last_set = -1;
1167 
1168       label_tick++;
1169       if (!single_pred_p (this_basic_block)
1170 	  || single_pred (this_basic_block) != last_bb)
1171 	label_tick_ebb_start = label_tick;
1172       last_bb = this_basic_block;
1173 
1174       FOR_BB_INSNS (this_basic_block, insn)
1175         if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1176 	  {
1177 #ifdef AUTO_INC_DEC
1178             rtx links;
1179 #endif
1180 
1181             subst_low_luid = DF_INSN_LUID (insn);
1182             subst_insn = insn;
1183 
1184 	    note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1185 		         insn);
1186 	    record_dead_and_set_regs (insn);
1187 
1188 #ifdef AUTO_INC_DEC
1189 	    for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1190 	      if (REG_NOTE_KIND (links) == REG_INC)
1191 	        set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1192 						  insn);
1193 #endif
1194 
1195 	    /* Record the current insn_rtx_cost of this instruction.  */
1196 	    if (NONJUMP_INSN_P (insn))
1197 	      INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1198 	      					optimize_this_for_speed_p);
1199 	    if (dump_file)
1200 	      fprintf(dump_file, "insn_cost %d: %d\n",
1201 		    INSN_UID (insn), INSN_COST (insn));
1202 	  }
1203     }
1204 
1205   nonzero_sign_valid = 1;
1206 
1207   /* Now scan all the insns in forward order.  */
1208   label_tick = label_tick_ebb_start = 1;
1209   init_reg_last ();
1210   setup_incoming_promotions (first);
1211   last_bb = ENTRY_BLOCK_PTR;
1212 
1213   FOR_EACH_BB (this_basic_block)
1214     {
1215       rtx last_combined_insn = NULL_RTX;
1216       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1217       last_call_luid = 0;
1218       mem_last_set = -1;
1219 
1220       label_tick++;
1221       if (!single_pred_p (this_basic_block)
1222 	  || single_pred (this_basic_block) != last_bb)
1223 	label_tick_ebb_start = label_tick;
1224       last_bb = this_basic_block;
1225 
1226       rtl_profile_for_bb (this_basic_block);
1227       for (insn = BB_HEAD (this_basic_block);
1228 	   insn != NEXT_INSN (BB_END (this_basic_block));
1229 	   insn = next ? next : NEXT_INSN (insn))
1230 	{
1231 	  next = 0;
1232 	  if (NONDEBUG_INSN_P (insn))
1233 	    {
1234 	      while (last_combined_insn
1235 		     && INSN_DELETED_P (last_combined_insn))
1236 		last_combined_insn = PREV_INSN (last_combined_insn);
1237 	      if (last_combined_insn == NULL_RTX
1238 		  || BARRIER_P (last_combined_insn)
1239 		  || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1240 		  || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1241 		last_combined_insn = insn;
1242 
1243 	      /* See if we know about function return values before this
1244 		 insn based upon SUBREG flags.  */
1245 	      check_promoted_subreg (insn, PATTERN (insn));
1246 
1247 	      /* See if we can find hardregs and subreg of pseudos in
1248 		 narrower modes.  This could help turning TRUNCATEs
1249 		 into SUBREGs.  */
1250 	      note_uses (&PATTERN (insn), record_truncated_values, NULL);
1251 
1252 	      /* Try this insn with each insn it links back to.  */
1253 
1254 	      FOR_EACH_LOG_LINK (links, insn)
1255 		if ((next = try_combine (insn, links->insn, NULL_RTX,
1256 					 NULL_RTX, &new_direct_jump_p,
1257 					 last_combined_insn)) != 0)
1258 		  goto retry;
1259 
1260 	      /* Try each sequence of three linked insns ending with this one.  */
1261 
1262 	      FOR_EACH_LOG_LINK (links, insn)
1263 		{
1264 		  rtx link = links->insn;
1265 
1266 		  /* If the linked insn has been replaced by a note, then there
1267 		     is no point in pursuing this chain any further.  */
1268 		  if (NOTE_P (link))
1269 		    continue;
1270 
1271 		  FOR_EACH_LOG_LINK (nextlinks, link)
1272 		    if ((next = try_combine (insn, link, nextlinks->insn,
1273 					     NULL_RTX, &new_direct_jump_p,
1274 					     last_combined_insn)) != 0)
1275 		      goto retry;
1276 		}
1277 
1278 #ifdef HAVE_cc0
1279 	      /* Try to combine a jump insn that uses CC0
1280 		 with a preceding insn that sets CC0, and maybe with its
1281 		 logical predecessor as well.
1282 		 This is how we make decrement-and-branch insns.
1283 		 We need this special code because data flow connections
1284 		 via CC0 do not get entered in LOG_LINKS.  */
1285 
1286 	      if (JUMP_P (insn)
1287 		  && (prev = prev_nonnote_insn (insn)) != 0
1288 		  && NONJUMP_INSN_P (prev)
1289 		  && sets_cc0_p (PATTERN (prev)))
1290 		{
1291 		  if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1292 					   &new_direct_jump_p,
1293 					   last_combined_insn)) != 0)
1294 		    goto retry;
1295 
1296 		  FOR_EACH_LOG_LINK (nextlinks, prev)
1297 		    if ((next = try_combine (insn, prev, nextlinks->insn,
1298 					     NULL_RTX, &new_direct_jump_p,
1299 					     last_combined_insn)) != 0)
1300 		      goto retry;
1301 		}
1302 
1303 	      /* Do the same for an insn that explicitly references CC0.  */
1304 	      if (NONJUMP_INSN_P (insn)
1305 		  && (prev = prev_nonnote_insn (insn)) != 0
1306 		  && NONJUMP_INSN_P (prev)
1307 		  && sets_cc0_p (PATTERN (prev))
1308 		  && GET_CODE (PATTERN (insn)) == SET
1309 		  && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1310 		{
1311 		  if ((next = try_combine (insn, prev, NULL_RTX, NULL_RTX,
1312 					   &new_direct_jump_p,
1313 					   last_combined_insn)) != 0)
1314 		    goto retry;
1315 
1316 		  FOR_EACH_LOG_LINK (nextlinks, prev)
1317 		    if ((next = try_combine (insn, prev, nextlinks->insn,
1318 					     NULL_RTX, &new_direct_jump_p,
1319 					     last_combined_insn)) != 0)
1320 		      goto retry;
1321 		}
1322 
1323 	      /* Finally, see if any of the insns that this insn links to
1324 		 explicitly references CC0.  If so, try this insn, that insn,
1325 		 and its predecessor if it sets CC0.  */
1326 	      FOR_EACH_LOG_LINK (links, insn)
1327 		if (NONJUMP_INSN_P (links->insn)
1328 		    && GET_CODE (PATTERN (links->insn)) == SET
1329 		    && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1330 		    && (prev = prev_nonnote_insn (links->insn)) != 0
1331 		    && NONJUMP_INSN_P (prev)
1332 		    && sets_cc0_p (PATTERN (prev))
1333 		    && (next = try_combine (insn, links->insn,
1334 					    prev, NULL_RTX, &new_direct_jump_p,
1335 					    last_combined_insn)) != 0)
1336 		  goto retry;
1337 #endif
1338 
1339 	      /* Try combining an insn with two different insns whose results it
1340 		 uses.  */
1341 	      FOR_EACH_LOG_LINK (links, insn)
1342 		for (nextlinks = links->next; nextlinks;
1343 		     nextlinks = nextlinks->next)
1344 		  if ((next = try_combine (insn, links->insn,
1345 					   nextlinks->insn, NULL_RTX,
1346 					   &new_direct_jump_p,
1347 					   last_combined_insn)) != 0)
1348 		    goto retry;
1349 
1350 	      /* Try four-instruction combinations.  */
1351 	      FOR_EACH_LOG_LINK (links, insn)
1352 		{
1353 		  struct insn_link *next1;
1354 		  rtx link = links->insn;
1355 
1356 		  /* If the linked insn has been replaced by a note, then there
1357 		     is no point in pursuing this chain any further.  */
1358 		  if (NOTE_P (link))
1359 		    continue;
1360 
1361 		  FOR_EACH_LOG_LINK (next1, link)
1362 		    {
1363 		      rtx link1 = next1->insn;
1364 		      if (NOTE_P (link1))
1365 			continue;
1366 		      /* I0 -> I1 -> I2 -> I3.  */
1367 		      FOR_EACH_LOG_LINK (nextlinks, link1)
1368 			if ((next = try_combine (insn, link, link1,
1369 						 nextlinks->insn,
1370 						 &new_direct_jump_p,
1371 						 last_combined_insn)) != 0)
1372 			  goto retry;
1373 		      /* I0, I1 -> I2, I2 -> I3.  */
1374 		      for (nextlinks = next1->next; nextlinks;
1375 			   nextlinks = nextlinks->next)
1376 			if ((next = try_combine (insn, link, link1,
1377 						 nextlinks->insn,
1378 						 &new_direct_jump_p,
1379 						 last_combined_insn)) != 0)
1380 			  goto retry;
1381 		    }
1382 
1383 		  for (next1 = links->next; next1; next1 = next1->next)
1384 		    {
1385 		      rtx link1 = next1->insn;
1386 		      if (NOTE_P (link1))
1387 			continue;
1388 		      /* I0 -> I2; I1, I2 -> I3.  */
1389 		      FOR_EACH_LOG_LINK (nextlinks, link)
1390 			if ((next = try_combine (insn, link, link1,
1391 						 nextlinks->insn,
1392 						 &new_direct_jump_p,
1393 						 last_combined_insn)) != 0)
1394 			  goto retry;
1395 		      /* I0 -> I1; I1, I2 -> I3.  */
1396 		      FOR_EACH_LOG_LINK (nextlinks, link1)
1397 			if ((next = try_combine (insn, link, link1,
1398 						 nextlinks->insn,
1399 						 &new_direct_jump_p,
1400 						 last_combined_insn)) != 0)
1401 			  goto retry;
1402 		    }
1403 		}
1404 
1405 	      /* Try this insn with each REG_EQUAL note it links back to.  */
1406 	      FOR_EACH_LOG_LINK (links, insn)
1407 		{
1408 		  rtx set, note;
1409 		  rtx temp = links->insn;
1410 		  if ((set = single_set (temp)) != 0
1411 		      && (note = find_reg_equal_equiv_note (temp)) != 0
1412 		      && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1413 		      /* Avoid using a register that may already been marked
1414 			 dead by an earlier instruction.  */
1415 		      && ! unmentioned_reg_p (note, SET_SRC (set))
1416 		      && (GET_MODE (note) == VOIDmode
1417 			  ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1418 			  : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1419 		    {
1420 		      /* Temporarily replace the set's source with the
1421 			 contents of the REG_EQUAL note.  The insn will
1422 			 be deleted or recognized by try_combine.  */
1423 		      rtx orig = SET_SRC (set);
1424 		      SET_SRC (set) = note;
1425 		      i2mod = temp;
1426 		      i2mod_old_rhs = copy_rtx (orig);
1427 		      i2mod_new_rhs = copy_rtx (note);
1428 		      next = try_combine (insn, i2mod, NULL_RTX, NULL_RTX,
1429 					  &new_direct_jump_p,
1430 					  last_combined_insn);
1431 		      i2mod = NULL_RTX;
1432 		      if (next)
1433 			goto retry;
1434 		      SET_SRC (set) = orig;
1435 		    }
1436 		}
1437 
1438 	      if (!NOTE_P (insn))
1439 		record_dead_and_set_regs (insn);
1440 
1441 	    retry:
1442 	      ;
1443 	    }
1444 	}
1445     }
1446 
1447   default_rtl_profile ();
1448   clear_bb_flags ();
1449   new_direct_jump_p |= purge_all_dead_edges ();
1450   delete_noop_moves ();
1451 
1452   /* Clean up.  */
1453   obstack_free (&insn_link_obstack, NULL);
1454   free (uid_log_links);
1455   free (uid_insn_cost);
1456   reg_stat.release ();
1457 
1458   {
1459     struct undo *undo, *next;
1460     for (undo = undobuf.frees; undo; undo = next)
1461       {
1462 	next = undo->next;
1463 	free (undo);
1464       }
1465     undobuf.frees = 0;
1466   }
1467 
1468   total_attempts += combine_attempts;
1469   total_merges += combine_merges;
1470   total_extras += combine_extras;
1471   total_successes += combine_successes;
1472 
1473   nonzero_sign_valid = 0;
1474   rtl_hooks = general_rtl_hooks;
1475 
1476   /* Make recognizer allow volatile MEMs again.  */
1477   init_recog ();
1478 
1479   return new_direct_jump_p;
1480 }
1481 
1482 /* Wipe the last_xxx fields of reg_stat in preparation for another pass.  */
1483 
1484 static void
1485 init_reg_last (void)
1486 {
1487   unsigned int i;
1488   reg_stat_type *p;
1489 
1490   FOR_EACH_VEC_ELT (reg_stat, i, p)
1491     memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1492 }
1493 
1494 /* Set up any promoted values for incoming argument registers.  */
1495 
1496 static void
1497 setup_incoming_promotions (rtx first)
1498 {
1499   tree arg;
1500   bool strictly_local = false;
1501 
1502   for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1503        arg = DECL_CHAIN (arg))
1504     {
1505       rtx x, reg = DECL_INCOMING_RTL (arg);
1506       int uns1, uns3;
1507       enum machine_mode mode1, mode2, mode3, mode4;
1508 
1509       /* Only continue if the incoming argument is in a register.  */
1510       if (!REG_P (reg))
1511 	continue;
1512 
1513       /* Determine, if possible, whether all call sites of the current
1514          function lie within the current compilation unit.  (This does
1515 	 take into account the exporting of a function via taking its
1516 	 address, and so forth.)  */
1517       strictly_local = cgraph_local_info (current_function_decl)->local;
1518 
1519       /* The mode and signedness of the argument before any promotions happen
1520          (equal to the mode of the pseudo holding it at that stage).  */
1521       mode1 = TYPE_MODE (TREE_TYPE (arg));
1522       uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1523 
1524       /* The mode and signedness of the argument after any source language and
1525          TARGET_PROMOTE_PROTOTYPES-driven promotions.  */
1526       mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1527       uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1528 
1529       /* The mode and signedness of the argument as it is actually passed,
1530          see assign_parm_setup_reg in function.c.  */
1531       mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1532 				     TREE_TYPE (cfun->decl), 0);
1533 
1534       /* The mode of the register in which the argument is being passed.  */
1535       mode4 = GET_MODE (reg);
1536 
1537       /* Eliminate sign extensions in the callee when:
1538 	 (a) A mode promotion has occurred;  */
1539       if (mode1 == mode3)
1540 	continue;
1541       /* (b) The mode of the register is the same as the mode of
1542 	     the argument as it is passed; */
1543       if (mode3 != mode4)
1544 	continue;
1545       /* (c) There's no language level extension;  */
1546       if (mode1 == mode2)
1547 	;
1548       /* (c.1) All callers are from the current compilation unit.  If that's
1549 	 the case we don't have to rely on an ABI, we only have to know
1550 	 what we're generating right now, and we know that we will do the
1551 	 mode1 to mode2 promotion with the given sign.  */
1552       else if (!strictly_local)
1553 	continue;
1554       /* (c.2) The combination of the two promotions is useful.  This is
1555 	 true when the signs match, or if the first promotion is unsigned.
1556 	 In the later case, (sign_extend (zero_extend x)) is the same as
1557 	 (zero_extend (zero_extend x)), so make sure to force UNS3 true.  */
1558       else if (uns1)
1559 	uns3 = true;
1560       else if (uns3)
1561 	continue;
1562 
1563       /* Record that the value was promoted from mode1 to mode3,
1564 	 so that any sign extension at the head of the current
1565 	 function may be eliminated.  */
1566       x = gen_rtx_CLOBBER (mode1, const0_rtx);
1567       x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1568       record_value_for_reg (reg, first, x);
1569     }
1570 }
1571 
1572 /* Called via note_stores.  If X is a pseudo that is narrower than
1573    HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1574 
1575    If we are setting only a portion of X and we can't figure out what
1576    portion, assume all bits will be used since we don't know what will
1577    be happening.
1578 
1579    Similarly, set how many bits of X are known to be copies of the sign bit
1580    at all locations in the function.  This is the smallest number implied
1581    by any set of X.  */
1582 
1583 static void
1584 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1585 {
1586   rtx insn = (rtx) data;
1587   unsigned int num;
1588 
1589   if (REG_P (x)
1590       && REGNO (x) >= FIRST_PSEUDO_REGISTER
1591       /* If this register is undefined at the start of the file, we can't
1592 	 say what its contents were.  */
1593       && ! REGNO_REG_SET_P
1594            (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
1595       && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1596     {
1597       reg_stat_type *rsp = &reg_stat[REGNO (x)];
1598 
1599       if (set == 0 || GET_CODE (set) == CLOBBER)
1600 	{
1601 	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1602 	  rsp->sign_bit_copies = 1;
1603 	  return;
1604 	}
1605 
1606       /* If this register is being initialized using itself, and the
1607 	 register is uninitialized in this basic block, and there are
1608 	 no LOG_LINKS which set the register, then part of the
1609 	 register is uninitialized.  In that case we can't assume
1610 	 anything about the number of nonzero bits.
1611 
1612 	 ??? We could do better if we checked this in
1613 	 reg_{nonzero_bits,num_sign_bit_copies}_for_combine.  Then we
1614 	 could avoid making assumptions about the insn which initially
1615 	 sets the register, while still using the information in other
1616 	 insns.  We would have to be careful to check every insn
1617 	 involved in the combination.  */
1618 
1619       if (insn
1620 	  && reg_referenced_p (x, PATTERN (insn))
1621 	  && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1622 			       REGNO (x)))
1623 	{
1624 	  struct insn_link *link;
1625 
1626 	  FOR_EACH_LOG_LINK (link, insn)
1627 	    if (dead_or_set_p (link->insn, x))
1628 	      break;
1629 	  if (!link)
1630 	    {
1631 	      rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1632 	      rsp->sign_bit_copies = 1;
1633 	      return;
1634 	    }
1635 	}
1636 
1637       /* If this is a complex assignment, see if we can convert it into a
1638 	 simple assignment.  */
1639       set = expand_field_assignment (set);
1640 
1641       /* If this is a simple assignment, or we have a paradoxical SUBREG,
1642 	 set what we know about X.  */
1643 
1644       if (SET_DEST (set) == x
1645 	  || (paradoxical_subreg_p (SET_DEST (set))
1646 	      && SUBREG_REG (SET_DEST (set)) == x))
1647 	{
1648 	  rtx src = SET_SRC (set);
1649 
1650 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
1651 	  /* If X is narrower than a word and SRC is a non-negative
1652 	     constant that would appear negative in the mode of X,
1653 	     sign-extend it for use in reg_stat[].nonzero_bits because some
1654 	     machines (maybe most) will actually do the sign-extension
1655 	     and this is the conservative approach.
1656 
1657 	     ??? For 2.5, try to tighten up the MD files in this regard
1658 	     instead of this kludge.  */
1659 
1660 	  if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
1661 	      && CONST_INT_P (src)
1662 	      && INTVAL (src) > 0
1663 	      && val_signbit_known_set_p (GET_MODE (x), INTVAL (src)))
1664 	    src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x)));
1665 #endif
1666 
1667 	  /* Don't call nonzero_bits if it cannot change anything.  */
1668 	  if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1669 	    rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
1670 	  num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1671 	  if (rsp->sign_bit_copies == 0
1672 	      || rsp->sign_bit_copies > num)
1673 	    rsp->sign_bit_copies = num;
1674 	}
1675       else
1676 	{
1677 	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1678 	  rsp->sign_bit_copies = 1;
1679 	}
1680     }
1681 }
1682 
1683 /* See if INSN can be combined into I3.  PRED, PRED2, SUCC and SUCC2 are
1684    optionally insns that were previously combined into I3 or that will be
1685    combined into the merger of INSN and I3.  The order is PRED, PRED2,
1686    INSN, SUCC, SUCC2, I3.
1687 
1688    Return 0 if the combination is not allowed for any reason.
1689 
1690    If the combination is allowed, *PDEST will be set to the single
1691    destination of INSN and *PSRC to the single source, and this function
1692    will return 1.  */
1693 
1694 static int
1695 can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED,
1696 	       rtx pred2 ATTRIBUTE_UNUSED, rtx succ, rtx succ2,
1697 	       rtx *pdest, rtx *psrc)
1698 {
1699   int i;
1700   const_rtx set = 0;
1701   rtx src, dest;
1702   rtx p;
1703 #ifdef AUTO_INC_DEC
1704   rtx link;
1705 #endif
1706   bool all_adjacent = true;
1707   int (*is_volatile_p) (const_rtx);
1708 
1709   if (succ)
1710     {
1711       if (succ2)
1712 	{
1713 	  if (next_active_insn (succ2) != i3)
1714 	    all_adjacent = false;
1715 	  if (next_active_insn (succ) != succ2)
1716 	    all_adjacent = false;
1717 	}
1718       else if (next_active_insn (succ) != i3)
1719 	all_adjacent = false;
1720       if (next_active_insn (insn) != succ)
1721 	all_adjacent = false;
1722     }
1723   else if (next_active_insn (insn) != i3)
1724     all_adjacent = false;
1725 
1726   /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1727      or a PARALLEL consisting of such a SET and CLOBBERs.
1728 
1729      If INSN has CLOBBER parallel parts, ignore them for our processing.
1730      By definition, these happen during the execution of the insn.  When it
1731      is merged with another insn, all bets are off.  If they are, in fact,
1732      needed and aren't also supplied in I3, they may be added by
1733      recog_for_combine.  Otherwise, it won't match.
1734 
1735      We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1736      note.
1737 
1738      Get the source and destination of INSN.  If more than one, can't
1739      combine.  */
1740 
1741   if (GET_CODE (PATTERN (insn)) == SET)
1742     set = PATTERN (insn);
1743   else if (GET_CODE (PATTERN (insn)) == PARALLEL
1744 	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1745     {
1746       for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1747 	{
1748 	  rtx elt = XVECEXP (PATTERN (insn), 0, i);
1749 
1750 	  switch (GET_CODE (elt))
1751 	    {
1752 	    /* This is important to combine floating point insns
1753 	       for the SH4 port.  */
1754 	    case USE:
1755 	      /* Combining an isolated USE doesn't make sense.
1756 		 We depend here on combinable_i3pat to reject them.  */
1757 	      /* The code below this loop only verifies that the inputs of
1758 		 the SET in INSN do not change.  We call reg_set_between_p
1759 		 to verify that the REG in the USE does not change between
1760 		 I3 and INSN.
1761 		 If the USE in INSN was for a pseudo register, the matching
1762 		 insn pattern will likely match any register; combining this
1763 		 with any other USE would only be safe if we knew that the
1764 		 used registers have identical values, or if there was
1765 		 something to tell them apart, e.g. different modes.  For
1766 		 now, we forgo such complicated tests and simply disallow
1767 		 combining of USES of pseudo registers with any other USE.  */
1768 	      if (REG_P (XEXP (elt, 0))
1769 		  && GET_CODE (PATTERN (i3)) == PARALLEL)
1770 		{
1771 		  rtx i3pat = PATTERN (i3);
1772 		  int i = XVECLEN (i3pat, 0) - 1;
1773 		  unsigned int regno = REGNO (XEXP (elt, 0));
1774 
1775 		  do
1776 		    {
1777 		      rtx i3elt = XVECEXP (i3pat, 0, i);
1778 
1779 		      if (GET_CODE (i3elt) == USE
1780 			  && REG_P (XEXP (i3elt, 0))
1781 			  && (REGNO (XEXP (i3elt, 0)) == regno
1782 			      ? reg_set_between_p (XEXP (elt, 0),
1783 						   PREV_INSN (insn), i3)
1784 			      : regno >= FIRST_PSEUDO_REGISTER))
1785 			return 0;
1786 		    }
1787 		  while (--i >= 0);
1788 		}
1789 	      break;
1790 
1791 	      /* We can ignore CLOBBERs.  */
1792 	    case CLOBBER:
1793 	      break;
1794 
1795 	    case SET:
1796 	      /* Ignore SETs whose result isn't used but not those that
1797 		 have side-effects.  */
1798 	      if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1799 		  && insn_nothrow_p (insn)
1800 		  && !side_effects_p (elt))
1801 		break;
1802 
1803 	      /* If we have already found a SET, this is a second one and
1804 		 so we cannot combine with this insn.  */
1805 	      if (set)
1806 		return 0;
1807 
1808 	      set = elt;
1809 	      break;
1810 
1811 	    default:
1812 	      /* Anything else means we can't combine.  */
1813 	      return 0;
1814 	    }
1815 	}
1816 
1817       if (set == 0
1818 	  /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1819 	     so don't do anything with it.  */
1820 	  || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1821 	return 0;
1822     }
1823   else
1824     return 0;
1825 
1826   if (set == 0)
1827     return 0;
1828 
1829   /* The simplification in expand_field_assignment may call back to
1830      get_last_value, so set safe guard here.  */
1831   subst_low_luid = DF_INSN_LUID (insn);
1832 
1833   set = expand_field_assignment (set);
1834   src = SET_SRC (set), dest = SET_DEST (set);
1835 
1836   /* Don't eliminate a store in the stack pointer.  */
1837   if (dest == stack_pointer_rtx
1838       /* Don't combine with an insn that sets a register to itself if it has
1839 	 a REG_EQUAL note.  This may be part of a LIBCALL sequence.  */
1840       || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1841       /* Can't merge an ASM_OPERANDS.  */
1842       || GET_CODE (src) == ASM_OPERANDS
1843       /* Can't merge a function call.  */
1844       || GET_CODE (src) == CALL
1845       /* Don't eliminate a function call argument.  */
1846       || (CALL_P (i3)
1847 	  && (find_reg_fusage (i3, USE, dest)
1848 	      || (REG_P (dest)
1849 		  && REGNO (dest) < FIRST_PSEUDO_REGISTER
1850 		  && global_regs[REGNO (dest)])))
1851       /* Don't substitute into an incremented register.  */
1852       || FIND_REG_INC_NOTE (i3, dest)
1853       || (succ && FIND_REG_INC_NOTE (succ, dest))
1854       || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1855       /* Don't substitute into a non-local goto, this confuses CFG.  */
1856       || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1857       /* Make sure that DEST is not used after SUCC but before I3.  */
1858       || (!all_adjacent
1859 	  && ((succ2
1860 	       && (reg_used_between_p (dest, succ2, i3)
1861 		   || reg_used_between_p (dest, succ, succ2)))
1862 	      || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1863       /* Make sure that the value that is to be substituted for the register
1864 	 does not use any registers whose values alter in between.  However,
1865 	 If the insns are adjacent, a use can't cross a set even though we
1866 	 think it might (this can happen for a sequence of insns each setting
1867 	 the same destination; last_set of that register might point to
1868 	 a NOTE).  If INSN has a REG_EQUIV note, the register is always
1869 	 equivalent to the memory so the substitution is valid even if there
1870 	 are intervening stores.  Also, don't move a volatile asm or
1871 	 UNSPEC_VOLATILE across any other insns.  */
1872       || (! all_adjacent
1873 	  && (((!MEM_P (src)
1874 		|| ! find_reg_note (insn, REG_EQUIV, src))
1875 	       && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1876 	      || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1877 	      || GET_CODE (src) == UNSPEC_VOLATILE))
1878       /* Don't combine across a CALL_INSN, because that would possibly
1879 	 change whether the life span of some REGs crosses calls or not,
1880 	 and it is a pain to update that information.
1881 	 Exception: if source is a constant, moving it later can't hurt.
1882 	 Accept that as a special case.  */
1883       || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1884     return 0;
1885 
1886   /* DEST must either be a REG or CC0.  */
1887   if (REG_P (dest))
1888     {
1889       /* If register alignment is being enforced for multi-word items in all
1890 	 cases except for parameters, it is possible to have a register copy
1891 	 insn referencing a hard register that is not allowed to contain the
1892 	 mode being copied and which would not be valid as an operand of most
1893 	 insns.  Eliminate this problem by not combining with such an insn.
1894 
1895 	 Also, on some machines we don't want to extend the life of a hard
1896 	 register.  */
1897 
1898       if (REG_P (src)
1899 	  && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1900 	       && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1901 	      /* Don't extend the life of a hard register unless it is
1902 		 user variable (if we have few registers) or it can't
1903 		 fit into the desired register (meaning something special
1904 		 is going on).
1905 		 Also avoid substituting a return register into I3, because
1906 		 reload can't handle a conflict with constraints of other
1907 		 inputs.  */
1908 	      || (REGNO (src) < FIRST_PSEUDO_REGISTER
1909 		  && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
1910 	return 0;
1911     }
1912   else if (GET_CODE (dest) != CC0)
1913     return 0;
1914 
1915 
1916   if (GET_CODE (PATTERN (i3)) == PARALLEL)
1917     for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
1918       if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
1919 	{
1920 	  /* Don't substitute for a register intended as a clobberable
1921 	     operand.  */
1922 	  rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
1923 	  if (rtx_equal_p (reg, dest))
1924 	    return 0;
1925 
1926 	  /* If the clobber represents an earlyclobber operand, we must not
1927 	     substitute an expression containing the clobbered register.
1928 	     As we do not analyze the constraint strings here, we have to
1929 	     make the conservative assumption.  However, if the register is
1930 	     a fixed hard reg, the clobber cannot represent any operand;
1931 	     we leave it up to the machine description to either accept or
1932 	     reject use-and-clobber patterns.  */
1933 	  if (!REG_P (reg)
1934 	      || REGNO (reg) >= FIRST_PSEUDO_REGISTER
1935 	      || !fixed_regs[REGNO (reg)])
1936 	    if (reg_overlap_mentioned_p (reg, src))
1937 	      return 0;
1938 	}
1939 
1940   /* If INSN contains anything volatile, or is an `asm' (whether volatile
1941      or not), reject, unless nothing volatile comes between it and I3 */
1942 
1943   if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
1944     {
1945       /* Make sure neither succ nor succ2 contains a volatile reference.  */
1946       if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
1947 	return 0;
1948       if (succ != 0 && volatile_refs_p (PATTERN (succ)))
1949 	return 0;
1950       /* We'll check insns between INSN and I3 below.  */
1951     }
1952 
1953   /* If INSN is an asm, and DEST is a hard register, reject, since it has
1954      to be an explicit register variable, and was chosen for a reason.  */
1955 
1956   if (GET_CODE (src) == ASM_OPERANDS
1957       && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
1958     return 0;
1959 
1960   /* If INSN contains volatile references (specifically volatile MEMs),
1961      we cannot combine across any other volatile references.
1962      Even if INSN doesn't contain volatile references, any intervening
1963      volatile insn might affect machine state.  */
1964 
1965   is_volatile_p = volatile_refs_p (PATTERN (insn))
1966     ? volatile_refs_p
1967     : volatile_insn_p;
1968 
1969   for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
1970     if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
1971       return 0;
1972 
1973   /* If INSN contains an autoincrement or autodecrement, make sure that
1974      register is not used between there and I3, and not already used in
1975      I3 either.  Neither must it be used in PRED or SUCC, if they exist.
1976      Also insist that I3 not be a jump; if it were one
1977      and the incremented register were spilled, we would lose.  */
1978 
1979 #ifdef AUTO_INC_DEC
1980   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1981     if (REG_NOTE_KIND (link) == REG_INC
1982 	&& (JUMP_P (i3)
1983 	    || reg_used_between_p (XEXP (link, 0), insn, i3)
1984 	    || (pred != NULL_RTX
1985 		&& reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
1986 	    || (pred2 != NULL_RTX
1987 		&& reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
1988 	    || (succ != NULL_RTX
1989 		&& reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
1990 	    || (succ2 != NULL_RTX
1991 		&& reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
1992 	    || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
1993       return 0;
1994 #endif
1995 
1996 #ifdef HAVE_cc0
1997   /* Don't combine an insn that follows a CC0-setting insn.
1998      An insn that uses CC0 must not be separated from the one that sets it.
1999      We do, however, allow I2 to follow a CC0-setting insn if that insn
2000      is passed as I1; in that case it will be deleted also.
2001      We also allow combining in this case if all the insns are adjacent
2002      because that would leave the two CC0 insns adjacent as well.
2003      It would be more logical to test whether CC0 occurs inside I1 or I2,
2004      but that would be much slower, and this ought to be equivalent.  */
2005 
2006   p = prev_nonnote_insn (insn);
2007   if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2008       && ! all_adjacent)
2009     return 0;
2010 #endif
2011 
2012   /* If we get here, we have passed all the tests and the combination is
2013      to be allowed.  */
2014 
2015   *pdest = dest;
2016   *psrc = src;
2017 
2018   return 1;
2019 }
2020 
2021 /* LOC is the location within I3 that contains its pattern or the component
2022    of a PARALLEL of the pattern.  We validate that it is valid for combining.
2023 
2024    One problem is if I3 modifies its output, as opposed to replacing it
2025    entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2026    doing so would produce an insn that is not equivalent to the original insns.
2027 
2028    Consider:
2029 
2030 	 (set (reg:DI 101) (reg:DI 100))
2031 	 (set (subreg:SI (reg:DI 101) 0) <foo>)
2032 
2033    This is NOT equivalent to:
2034 
2035 	 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2036 		    (set (reg:DI 101) (reg:DI 100))])
2037 
2038    Not only does this modify 100 (in which case it might still be valid
2039    if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2040 
2041    We can also run into a problem if I2 sets a register that I1
2042    uses and I1 gets directly substituted into I3 (not via I2).  In that
2043    case, we would be getting the wrong value of I2DEST into I3, so we
2044    must reject the combination.  This case occurs when I2 and I1 both
2045    feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2046    If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2047    of a SET must prevent combination from occurring.  The same situation
2048    can occur for I0, in which case I0_NOT_IN_SRC is set.
2049 
2050    Before doing the above check, we first try to expand a field assignment
2051    into a set of logical operations.
2052 
2053    If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2054    we place a register that is both set and used within I3.  If more than one
2055    such register is detected, we fail.
2056 
2057    Return 1 if the combination is valid, zero otherwise.  */
2058 
2059 static int
2060 combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2061 		  int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2062 {
2063   rtx x = *loc;
2064 
2065   if (GET_CODE (x) == SET)
2066     {
2067       rtx set = x ;
2068       rtx dest = SET_DEST (set);
2069       rtx src = SET_SRC (set);
2070       rtx inner_dest = dest;
2071       rtx subdest;
2072 
2073       while (GET_CODE (inner_dest) == STRICT_LOW_PART
2074 	     || GET_CODE (inner_dest) == SUBREG
2075 	     || GET_CODE (inner_dest) == ZERO_EXTRACT)
2076 	inner_dest = XEXP (inner_dest, 0);
2077 
2078       /* Check for the case where I3 modifies its output, as discussed
2079 	 above.  We don't want to prevent pseudos from being combined
2080 	 into the address of a MEM, so only prevent the combination if
2081 	 i1 or i2 set the same MEM.  */
2082       if ((inner_dest != dest &&
2083 	   (!MEM_P (inner_dest)
2084 	    || rtx_equal_p (i2dest, inner_dest)
2085 	    || (i1dest && rtx_equal_p (i1dest, inner_dest))
2086 	    || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2087 	   && (reg_overlap_mentioned_p (i2dest, inner_dest)
2088 	       || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2089 	       || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2090 
2091 	  /* This is the same test done in can_combine_p except we can't test
2092 	     all_adjacent; we don't have to, since this instruction will stay
2093 	     in place, thus we are not considering increasing the lifetime of
2094 	     INNER_DEST.
2095 
2096 	     Also, if this insn sets a function argument, combining it with
2097 	     something that might need a spill could clobber a previous
2098 	     function argument; the all_adjacent test in can_combine_p also
2099 	     checks this; here, we do a more specific test for this case.  */
2100 
2101 	  || (REG_P (inner_dest)
2102 	      && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2103 	      && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2104 					GET_MODE (inner_dest))))
2105 	  || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2106 	  || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2107 	return 0;
2108 
2109       /* If DEST is used in I3, it is being killed in this insn, so
2110 	 record that for later.  We have to consider paradoxical
2111 	 subregs here, since they kill the whole register, but we
2112 	 ignore partial subregs, STRICT_LOW_PART, etc.
2113 	 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2114 	 STACK_POINTER_REGNUM, since these are always considered to be
2115 	 live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
2116       subdest = dest;
2117       if (GET_CODE (subdest) == SUBREG
2118 	  && (GET_MODE_SIZE (GET_MODE (subdest))
2119 	      >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2120 	subdest = SUBREG_REG (subdest);
2121       if (pi3dest_killed
2122 	  && REG_P (subdest)
2123 	  && reg_referenced_p (subdest, PATTERN (i3))
2124 	  && REGNO (subdest) != FRAME_POINTER_REGNUM
2125 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2126 	  && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
2127 #endif
2128 #if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
2129 	  && (REGNO (subdest) != ARG_POINTER_REGNUM
2130 	      || ! fixed_regs [REGNO (subdest)])
2131 #endif
2132 	  && REGNO (subdest) != STACK_POINTER_REGNUM)
2133 	{
2134 	  if (*pi3dest_killed)
2135 	    return 0;
2136 
2137 	  *pi3dest_killed = subdest;
2138 	}
2139     }
2140 
2141   else if (GET_CODE (x) == PARALLEL)
2142     {
2143       int i;
2144 
2145       for (i = 0; i < XVECLEN (x, 0); i++)
2146 	if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2147 				i1_not_in_src, i0_not_in_src, pi3dest_killed))
2148 	  return 0;
2149     }
2150 
2151   return 1;
2152 }
2153 
2154 /* Return 1 if X is an arithmetic expression that contains a multiplication
2155    and division.  We don't count multiplications by powers of two here.  */
2156 
2157 static int
2158 contains_muldiv (rtx x)
2159 {
2160   switch (GET_CODE (x))
2161     {
2162     case MOD:  case DIV:  case UMOD:  case UDIV:
2163       return 1;
2164 
2165     case MULT:
2166       return ! (CONST_INT_P (XEXP (x, 1))
2167 		&& exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2168     default:
2169       if (BINARY_P (x))
2170 	return contains_muldiv (XEXP (x, 0))
2171 	    || contains_muldiv (XEXP (x, 1));
2172 
2173       if (UNARY_P (x))
2174 	return contains_muldiv (XEXP (x, 0));
2175 
2176       return 0;
2177     }
2178 }
2179 
2180 /* Determine whether INSN can be used in a combination.  Return nonzero if
2181    not.  This is used in try_combine to detect early some cases where we
2182    can't perform combinations.  */
2183 
2184 static int
2185 cant_combine_insn_p (rtx insn)
2186 {
2187   rtx set;
2188   rtx src, dest;
2189 
2190   /* If this isn't really an insn, we can't do anything.
2191      This can occur when flow deletes an insn that it has merged into an
2192      auto-increment address.  */
2193   if (! INSN_P (insn))
2194     return 1;
2195 
2196   /* Never combine loads and stores involving hard regs that are likely
2197      to be spilled.  The register allocator can usually handle such
2198      reg-reg moves by tying.  If we allow the combiner to make
2199      substitutions of likely-spilled regs, reload might die.
2200      As an exception, we allow combinations involving fixed regs; these are
2201      not available to the register allocator so there's no risk involved.  */
2202 
2203   set = single_set (insn);
2204   if (! set)
2205     return 0;
2206   src = SET_SRC (set);
2207   dest = SET_DEST (set);
2208   if (GET_CODE (src) == SUBREG)
2209     src = SUBREG_REG (src);
2210   if (GET_CODE (dest) == SUBREG)
2211     dest = SUBREG_REG (dest);
2212   if (REG_P (src) && REG_P (dest)
2213       && ((HARD_REGISTER_P (src)
2214 	   && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2215 	   && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2216 	  || (HARD_REGISTER_P (dest)
2217 	      && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2218 	      && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2219     return 1;
2220 
2221   return 0;
2222 }
2223 
2224 struct likely_spilled_retval_info
2225 {
2226   unsigned regno, nregs;
2227   unsigned mask;
2228 };
2229 
2230 /* Called via note_stores by likely_spilled_retval_p.  Remove from info->mask
2231    hard registers that are known to be written to / clobbered in full.  */
2232 static void
2233 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2234 {
2235   struct likely_spilled_retval_info *const info =
2236     (struct likely_spilled_retval_info *) data;
2237   unsigned regno, nregs;
2238   unsigned new_mask;
2239 
2240   if (!REG_P (XEXP (set, 0)))
2241     return;
2242   regno = REGNO (x);
2243   if (regno >= info->regno + info->nregs)
2244     return;
2245   nregs = hard_regno_nregs[regno][GET_MODE (x)];
2246   if (regno + nregs <= info->regno)
2247     return;
2248   new_mask = (2U << (nregs - 1)) - 1;
2249   if (regno < info->regno)
2250     new_mask >>= info->regno - regno;
2251   else
2252     new_mask <<= regno - info->regno;
2253   info->mask &= ~new_mask;
2254 }
2255 
2256 /* Return nonzero iff part of the return value is live during INSN, and
2257    it is likely spilled.  This can happen when more than one insn is needed
2258    to copy the return value, e.g. when we consider to combine into the
2259    second copy insn for a complex value.  */
2260 
2261 static int
2262 likely_spilled_retval_p (rtx insn)
2263 {
2264   rtx use = BB_END (this_basic_block);
2265   rtx reg, p;
2266   unsigned regno, nregs;
2267   /* We assume here that no machine mode needs more than
2268      32 hard registers when the value overlaps with a register
2269      for which TARGET_FUNCTION_VALUE_REGNO_P is true.  */
2270   unsigned mask;
2271   struct likely_spilled_retval_info info;
2272 
2273   if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2274     return 0;
2275   reg = XEXP (PATTERN (use), 0);
2276   if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2277     return 0;
2278   regno = REGNO (reg);
2279   nregs = hard_regno_nregs[regno][GET_MODE (reg)];
2280   if (nregs == 1)
2281     return 0;
2282   mask = (2U << (nregs - 1)) - 1;
2283 
2284   /* Disregard parts of the return value that are set later.  */
2285   info.regno = regno;
2286   info.nregs = nregs;
2287   info.mask = mask;
2288   for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2289     if (INSN_P (p))
2290       note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2291   mask = info.mask;
2292 
2293   /* Check if any of the (probably) live return value registers is
2294      likely spilled.  */
2295   nregs --;
2296   do
2297     {
2298       if ((mask & 1 << nregs)
2299 	  && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2300 	return 1;
2301     } while (nregs--);
2302   return 0;
2303 }
2304 
2305 /* Adjust INSN after we made a change to its destination.
2306 
2307    Changing the destination can invalidate notes that say something about
2308    the results of the insn and a LOG_LINK pointing to the insn.  */
2309 
2310 static void
2311 adjust_for_new_dest (rtx insn)
2312 {
2313   /* For notes, be conservative and simply remove them.  */
2314   remove_reg_equal_equiv_notes (insn);
2315 
2316   /* The new insn will have a destination that was previously the destination
2317      of an insn just above it.  Call distribute_links to make a LOG_LINK from
2318      the next use of that destination.  */
2319   distribute_links (alloc_insn_link (insn, NULL));
2320 
2321   df_insn_rescan (insn);
2322 }
2323 
2324 /* Return TRUE if combine can reuse reg X in mode MODE.
2325    ADDED_SETS is nonzero if the original set is still required.  */
2326 static bool
2327 can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
2328 {
2329   unsigned int regno;
2330 
2331   if (!REG_P(x))
2332     return false;
2333 
2334   regno = REGNO (x);
2335   /* Allow hard registers if the new mode is legal, and occupies no more
2336      registers than the old mode.  */
2337   if (regno < FIRST_PSEUDO_REGISTER)
2338     return (HARD_REGNO_MODE_OK (regno, mode)
2339 	    && (hard_regno_nregs[regno][GET_MODE (x)]
2340 		>= hard_regno_nregs[regno][mode]));
2341 
2342   /* Or a pseudo that is only used once.  */
2343   return (REG_N_SETS (regno) == 1 && !added_sets
2344 	  && !REG_USERVAR_P (x));
2345 }
2346 
2347 
2348 /* Check whether X, the destination of a set, refers to part of
2349    the register specified by REG.  */
2350 
2351 static bool
2352 reg_subword_p (rtx x, rtx reg)
2353 {
2354   /* Check that reg is an integer mode register.  */
2355   if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2356     return false;
2357 
2358   if (GET_CODE (x) == STRICT_LOW_PART
2359       || GET_CODE (x) == ZERO_EXTRACT)
2360     x = XEXP (x, 0);
2361 
2362   return GET_CODE (x) == SUBREG
2363 	 && SUBREG_REG (x) == reg
2364 	 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2365 }
2366 
2367 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2368    Note that the INSN should be deleted *after* removing dead edges, so
2369    that the kept edge is the fallthrough edge for a (set (pc) (pc))
2370    but not for a (set (pc) (label_ref FOO)).  */
2371 
2372 static void
2373 update_cfg_for_uncondjump (rtx insn)
2374 {
2375   basic_block bb = BLOCK_FOR_INSN (insn);
2376   gcc_assert (BB_END (bb) == insn);
2377 
2378   purge_dead_edges (bb);
2379 
2380   delete_insn (insn);
2381   if (EDGE_COUNT (bb->succs) == 1)
2382     {
2383       rtx insn;
2384 
2385       single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2386 
2387       /* Remove barriers from the footer if there are any.  */
2388       for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2389 	if (BARRIER_P (insn))
2390 	  {
2391 	    if (PREV_INSN (insn))
2392 	      NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2393 	    else
2394 	      BB_FOOTER (bb) = NEXT_INSN (insn);
2395 	    if (NEXT_INSN (insn))
2396 	      PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2397 	  }
2398 	else if (LABEL_P (insn))
2399 	  break;
2400     }
2401 }
2402 
2403 /* Try to combine the insns I0, I1 and I2 into I3.
2404    Here I0, I1 and I2 appear earlier than I3.
2405    I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2406    I3.
2407 
2408    If we are combining more than two insns and the resulting insn is not
2409    recognized, try splitting it into two insns.  If that happens, I2 and I3
2410    are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2411    Otherwise, I0, I1 and I2 are pseudo-deleted.
2412 
2413    Return 0 if the combination does not work.  Then nothing is changed.
2414    If we did the combination, return the insn at which combine should
2415    resume scanning.
2416 
2417    Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2418    new direct jump instruction.
2419 
2420    LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2421    been I3 passed to an earlier try_combine within the same basic
2422    block.  */
2423 
2424 static rtx
2425 try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
2426 	     rtx last_combined_insn)
2427 {
2428   /* New patterns for I3 and I2, respectively.  */
2429   rtx newpat, newi2pat = 0;
2430   rtvec newpat_vec_with_clobbers = 0;
2431   int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2432   /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2433      dead.  */
2434   int added_sets_0, added_sets_1, added_sets_2;
2435   /* Total number of SETs to put into I3.  */
2436   int total_sets;
2437   /* Nonzero if I2's or I1's body now appears in I3.  */
2438   int i2_is_used = 0, i1_is_used = 0;
2439   /* INSN_CODEs for new I3, new I2, and user of condition code.  */
2440   int insn_code_number, i2_code_number = 0, other_code_number = 0;
2441   /* Contains I3 if the destination of I3 is used in its source, which means
2442      that the old life of I3 is being killed.  If that usage is placed into
2443      I2 and not in I3, a REG_DEAD note must be made.  */
2444   rtx i3dest_killed = 0;
2445   /* SET_DEST and SET_SRC of I2, I1 and I0.  */
2446   rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2447   /* Copy of SET_SRC of I1 and I0, if needed.  */
2448   rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2449   /* Set if I2DEST was reused as a scratch register.  */
2450   bool i2scratch = false;
2451   /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases.  */
2452   rtx i0pat = 0, i1pat = 0, i2pat = 0;
2453   /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC.  */
2454   int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2455   int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2456   int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2457   int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2458   /* Notes that must be added to REG_NOTES in I3 and I2.  */
2459   rtx new_i3_notes, new_i2_notes;
2460   /* Notes that we substituted I3 into I2 instead of the normal case.  */
2461   int i3_subst_into_i2 = 0;
2462   /* Notes that I1, I2 or I3 is a MULT operation.  */
2463   int have_mult = 0;
2464   int swap_i2i3 = 0;
2465   int changed_i3_dest = 0;
2466 
2467   int maxreg;
2468   rtx temp;
2469   struct insn_link *link;
2470   rtx other_pat = 0;
2471   rtx new_other_notes;
2472   int i;
2473 
2474   /* Only try four-insn combinations when there's high likelihood of
2475      success.  Look for simple insns, such as loads of constants or
2476      binary operations involving a constant.  */
2477   if (i0)
2478     {
2479       int i;
2480       int ngood = 0;
2481       int nshift = 0;
2482 
2483       if (!flag_expensive_optimizations)
2484 	return 0;
2485 
2486       for (i = 0; i < 4; i++)
2487 	{
2488 	  rtx insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2489 	  rtx set = single_set (insn);
2490 	  rtx src;
2491 	  if (!set)
2492 	    continue;
2493 	  src = SET_SRC (set);
2494 	  if (CONSTANT_P (src))
2495 	    {
2496 	      ngood += 2;
2497 	      break;
2498 	    }
2499 	  else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2500 	    ngood++;
2501 	  else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2502 		   || GET_CODE (src) == LSHIFTRT)
2503 	    nshift++;
2504 	}
2505       if (ngood < 2 && nshift < 2)
2506 	return 0;
2507     }
2508 
2509   /* Exit early if one of the insns involved can't be used for
2510      combinations.  */
2511   if (cant_combine_insn_p (i3)
2512       || cant_combine_insn_p (i2)
2513       || (i1 && cant_combine_insn_p (i1))
2514       || (i0 && cant_combine_insn_p (i0))
2515       || likely_spilled_retval_p (i3))
2516     return 0;
2517 
2518   combine_attempts++;
2519   undobuf.other_insn = 0;
2520 
2521   /* Reset the hard register usage information.  */
2522   CLEAR_HARD_REG_SET (newpat_used_regs);
2523 
2524   if (dump_file && (dump_flags & TDF_DETAILS))
2525     {
2526       if (i0)
2527 	fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2528 		 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2529       else if (i1)
2530 	fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2531 		 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2532       else
2533 	fprintf (dump_file, "\nTrying %d -> %d:\n",
2534 		 INSN_UID (i2), INSN_UID (i3));
2535     }
2536 
2537   /* If multiple insns feed into one of I2 or I3, they can be in any
2538      order.  To simplify the code below, reorder them in sequence.  */
2539   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2540     temp = i2, i2 = i0, i0 = temp;
2541   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2542     temp = i1, i1 = i0, i0 = temp;
2543   if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2544     temp = i1, i1 = i2, i2 = temp;
2545 
2546   added_links_insn = 0;
2547 
2548   /* First check for one important special case that the code below will
2549      not handle.  Namely, the case where I1 is zero, I2 is a PARALLEL
2550      and I3 is a SET whose SET_SRC is a SET_DEST in I2.  In that case,
2551      we may be able to replace that destination with the destination of I3.
2552      This occurs in the common code where we compute both a quotient and
2553      remainder into a structure, in which case we want to do the computation
2554      directly into the structure to avoid register-register copies.
2555 
2556      Note that this case handles both multiple sets in I2 and also cases
2557      where I2 has a number of CLOBBERs inside the PARALLEL.
2558 
2559      We make very conservative checks below and only try to handle the
2560      most common cases of this.  For example, we only handle the case
2561      where I2 and I3 are adjacent to avoid making difficult register
2562      usage tests.  */
2563 
2564   if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2565       && REG_P (SET_SRC (PATTERN (i3)))
2566       && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2567       && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2568       && GET_CODE (PATTERN (i2)) == PARALLEL
2569       && ! side_effects_p (SET_DEST (PATTERN (i3)))
2570       /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2571 	 below would need to check what is inside (and reg_overlap_mentioned_p
2572 	 doesn't support those codes anyway).  Don't allow those destinations;
2573 	 the resulting insn isn't likely to be recognized anyway.  */
2574       && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2575       && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2576       && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2577 				    SET_DEST (PATTERN (i3)))
2578       && next_active_insn (i2) == i3)
2579     {
2580       rtx p2 = PATTERN (i2);
2581 
2582       /* Make sure that the destination of I3,
2583 	 which we are going to substitute into one output of I2,
2584 	 is not used within another output of I2.  We must avoid making this:
2585 	 (parallel [(set (mem (reg 69)) ...)
2586 		    (set (reg 69) ...)])
2587 	 which is not well-defined as to order of actions.
2588 	 (Besides, reload can't handle output reloads for this.)
2589 
2590 	 The problem can also happen if the dest of I3 is a memory ref,
2591 	 if another dest in I2 is an indirect memory ref.  */
2592       for (i = 0; i < XVECLEN (p2, 0); i++)
2593 	if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2594 	     || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2595 	    && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2596 					SET_DEST (XVECEXP (p2, 0, i))))
2597 	  break;
2598 
2599       if (i == XVECLEN (p2, 0))
2600 	for (i = 0; i < XVECLEN (p2, 0); i++)
2601 	  if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2602 	      && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2603 	    {
2604 	      combine_merges++;
2605 
2606 	      subst_insn = i3;
2607 	      subst_low_luid = DF_INSN_LUID (i2);
2608 
2609 	      added_sets_2 = added_sets_1 = added_sets_0 = 0;
2610 	      i2src = SET_SRC (XVECEXP (p2, 0, i));
2611 	      i2dest = SET_DEST (XVECEXP (p2, 0, i));
2612 	      i2dest_killed = dead_or_set_p (i2, i2dest);
2613 
2614 	      /* Replace the dest in I2 with our dest and make the resulting
2615 		 insn the new pattern for I3.  Then skip to where we validate
2616 		 the pattern.  Everything was set up above.  */
2617 	      SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2618 	      newpat = p2;
2619 	      i3_subst_into_i2 = 1;
2620 	      goto validate_replacement;
2621 	    }
2622     }
2623 
2624   /* If I2 is setting a pseudo to a constant and I3 is setting some
2625      sub-part of it to another constant, merge them by making a new
2626      constant.  */
2627   if (i1 == 0
2628       && (temp = single_set (i2)) != 0
2629       && CONST_SCALAR_INT_P (SET_SRC (temp))
2630       && GET_CODE (PATTERN (i3)) == SET
2631       && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2632       && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
2633     {
2634       rtx dest = SET_DEST (PATTERN (i3));
2635       int offset = -1;
2636       int width = 0;
2637 
2638       /* There are not explicit tests to make sure that this is not a
2639 	 float, but there is code here that would not be correct if it
2640 	 was.  */
2641       gcc_assert (GET_MODE_CLASS (GET_MODE (SET_SRC (temp))) != MODE_FLOAT);
2642 
2643       if (GET_CODE (dest) == ZERO_EXTRACT)
2644 	{
2645 	  if (CONST_INT_P (XEXP (dest, 1))
2646 	      && CONST_INT_P (XEXP (dest, 2)))
2647 	    {
2648 	      width = INTVAL (XEXP (dest, 1));
2649 	      offset = INTVAL (XEXP (dest, 2));
2650 	      dest = XEXP (dest, 0);
2651 	      if (BITS_BIG_ENDIAN)
2652 		offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2653 	    }
2654 	}
2655       else
2656 	{
2657 	  if (GET_CODE (dest) == STRICT_LOW_PART)
2658 	    dest = XEXP (dest, 0);
2659 	  width = GET_MODE_PRECISION (GET_MODE (dest));
2660 	  offset = 0;
2661 	}
2662 
2663       if (offset >= 0)
2664 	{
2665 	  /* If this is the low part, we're done.  */
2666 	  if (subreg_lowpart_p (dest))
2667 	    ;
2668 	  /* Handle the case where inner is twice the size of outer.  */
2669 	  else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
2670 		   == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2671 	    offset += GET_MODE_PRECISION (GET_MODE (dest));
2672 	  /* Otherwise give up for now.  */
2673 	  else
2674 	    offset = -1;
2675 	}
2676 
2677       if (offset >= 0
2678 	  && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
2679 	      <= HOST_BITS_PER_DOUBLE_INT))
2680 	{
2681 	  double_int m, o, i;
2682 	  rtx inner = SET_SRC (PATTERN (i3));
2683 	  rtx outer = SET_SRC (temp);
2684 
2685 	  o = rtx_to_double_int (outer);
2686 	  i = rtx_to_double_int (inner);
2687 
2688 	  m = double_int::mask (width);
2689 	  i &= m;
2690 	  m = m.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
2691 	  i = i.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
2692 	  o = o.and_not (m) | i;
2693 
2694 	  combine_merges++;
2695 	  subst_insn = i3;
2696 	  subst_low_luid = DF_INSN_LUID (i2);
2697 	  added_sets_2 = added_sets_1 = added_sets_0 = 0;
2698 	  i2dest = SET_DEST (temp);
2699 	  i2dest_killed = dead_or_set_p (i2, i2dest);
2700 
2701 	  /* Replace the source in I2 with the new constant and make the
2702 	     resulting insn the new pattern for I3.  Then skip to where we
2703 	     validate the pattern.  Everything was set up above.  */
2704 	  SUBST (SET_SRC (temp),
2705 		 immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
2706 
2707 	  newpat = PATTERN (i2);
2708 
2709           /* The dest of I3 has been replaced with the dest of I2.  */
2710           changed_i3_dest = 1;
2711 	  goto validate_replacement;
2712 	}
2713     }
2714 
2715 #ifndef HAVE_cc0
2716   /* If we have no I1 and I2 looks like:
2717 	(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2718 		   (set Y OP)])
2719      make up a dummy I1 that is
2720 	(set Y OP)
2721      and change I2 to be
2722 	(set (reg:CC X) (compare:CC Y (const_int 0)))
2723 
2724      (We can ignore any trailing CLOBBERs.)
2725 
2726      This undoes a previous combination and allows us to match a branch-and-
2727      decrement insn.  */
2728 
2729   if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
2730       && XVECLEN (PATTERN (i2), 0) >= 2
2731       && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
2732       && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2733 	  == MODE_CC)
2734       && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2735       && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2736       && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
2737       && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
2738       && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2739 		      SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
2740     {
2741       for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
2742 	if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
2743 	  break;
2744 
2745       if (i == 1)
2746 	{
2747 	  /* We make I1 with the same INSN_UID as I2.  This gives it
2748 	     the same DF_INSN_LUID for value tracking.  Our fake I1 will
2749 	     never appear in the insn stream so giving it the same INSN_UID
2750 	     as I2 will not cause a problem.  */
2751 
2752 	  i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
2753 			     BLOCK_FOR_INSN (i2), XVECEXP (PATTERN (i2), 0, 1),
2754 			     INSN_LOCATION (i2), -1, NULL_RTX);
2755 
2756 	  SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2757 	  SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2758 		 SET_DEST (PATTERN (i1)));
2759 	  SUBST_LINK (LOG_LINKS (i2), alloc_insn_link (i1, LOG_LINKS (i2)));
2760 	}
2761     }
2762 #endif
2763 
2764   /* Verify that I2 and I1 are valid for combining.  */
2765   if (! can_combine_p (i2, i3, i0, i1, NULL_RTX, NULL_RTX, &i2dest, &i2src)
2766       || (i1 && ! can_combine_p (i1, i3, i0, NULL_RTX, i2, NULL_RTX,
2767 				 &i1dest, &i1src))
2768       || (i0 && ! can_combine_p (i0, i3, NULL_RTX, NULL_RTX, i1, i2,
2769 				 &i0dest, &i0src)))
2770     {
2771       undo_all ();
2772       return 0;
2773     }
2774 
2775   /* Record whether I2DEST is used in I2SRC and similarly for the other
2776      cases.  Knowing this will help in register status updating below.  */
2777   i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2778   i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2779   i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2780   i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2781   i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2782   i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2783   i2dest_killed = dead_or_set_p (i2, i2dest);
2784   i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2785   i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2786 
2787   /* For the earlier insns, determine which of the subsequent ones they
2788      feed.  */
2789   i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2790   i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2791   i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2792 			  : (!reg_overlap_mentioned_p (i1dest, i0dest)
2793 			     && reg_overlap_mentioned_p (i0dest, i2src))));
2794 
2795   /* Ensure that I3's pattern can be the destination of combines.  */
2796   if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
2797 			  i1 && i2dest_in_i1src && !i1_feeds_i2_n,
2798 			  i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
2799 				 || (i1dest_in_i0src && !i0_feeds_i1_n)),
2800 			  &i3dest_killed))
2801     {
2802       undo_all ();
2803       return 0;
2804     }
2805 
2806   /* See if any of the insns is a MULT operation.  Unless one is, we will
2807      reject a combination that is, since it must be slower.  Be conservative
2808      here.  */
2809   if (GET_CODE (i2src) == MULT
2810       || (i1 != 0 && GET_CODE (i1src) == MULT)
2811       || (i0 != 0 && GET_CODE (i0src) == MULT)
2812       || (GET_CODE (PATTERN (i3)) == SET
2813 	  && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
2814     have_mult = 1;
2815 
2816   /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
2817      We used to do this EXCEPT in one case: I3 has a post-inc in an
2818      output operand.  However, that exception can give rise to insns like
2819 	mov r3,(r3)+
2820      which is a famous insn on the PDP-11 where the value of r3 used as the
2821      source was model-dependent.  Avoid this sort of thing.  */
2822 
2823 #if 0
2824   if (!(GET_CODE (PATTERN (i3)) == SET
2825 	&& REG_P (SET_SRC (PATTERN (i3)))
2826 	&& MEM_P (SET_DEST (PATTERN (i3)))
2827 	&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
2828 	    || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
2829     /* It's not the exception.  */
2830 #endif
2831 #ifdef AUTO_INC_DEC
2832     {
2833       rtx link;
2834       for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
2835 	if (REG_NOTE_KIND (link) == REG_INC
2836 	    && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
2837 		|| (i1 != 0
2838 		    && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
2839 	  {
2840 	    undo_all ();
2841 	    return 0;
2842 	  }
2843     }
2844 #endif
2845 
2846   /* See if the SETs in I1 or I2 need to be kept around in the merged
2847      instruction: whenever the value set there is still needed past I3.
2848      For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.
2849 
2850      For the SET in I1, we have two cases:  If I1 and I2 independently
2851      feed into I3, the set in I1 needs to be kept around if I1DEST dies
2852      or is set in I3.  Otherwise (if I1 feeds I2 which feeds I3), the set
2853      in I1 needs to be kept around unless I1DEST dies or is set in either
2854      I2 or I3.  The same consideration applies to I0.  */
2855 
2856   added_sets_2 = !dead_or_set_p (i3, i2dest);
2857 
2858   if (i1)
2859     added_sets_1 = !(dead_or_set_p (i3, i1dest)
2860 		     || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
2861   else
2862     added_sets_1 = 0;
2863 
2864   if (i0)
2865     added_sets_0 =  !(dead_or_set_p (i3, i0dest)
2866 		      || (i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
2867 		      || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)));
2868   else
2869     added_sets_0 = 0;
2870 
2871   /* We are about to copy insns for the case where they need to be kept
2872      around.  Check that they can be copied in the merged instruction.  */
2873 
2874   if (targetm.cannot_copy_insn_p
2875       && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
2876 	  || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
2877 	  || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
2878     {
2879       undo_all ();
2880       return 0;
2881     }
2882 
2883   /* If the set in I2 needs to be kept around, we must make a copy of
2884      PATTERN (I2), so that when we substitute I1SRC for I1DEST in
2885      PATTERN (I2), we are only substituting for the original I1DEST, not into
2886      an already-substituted copy.  This also prevents making self-referential
2887      rtx.  If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
2888      I2DEST.  */
2889 
2890   if (added_sets_2)
2891     {
2892       if (GET_CODE (PATTERN (i2)) == PARALLEL)
2893 	i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
2894       else
2895 	i2pat = copy_rtx (PATTERN (i2));
2896     }
2897 
2898   if (added_sets_1)
2899     {
2900       if (GET_CODE (PATTERN (i1)) == PARALLEL)
2901 	i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
2902       else
2903 	i1pat = copy_rtx (PATTERN (i1));
2904     }
2905 
2906   if (added_sets_0)
2907     {
2908       if (GET_CODE (PATTERN (i0)) == PARALLEL)
2909 	i0pat = gen_rtx_SET (VOIDmode, i0dest, copy_rtx (i0src));
2910       else
2911 	i0pat = copy_rtx (PATTERN (i0));
2912     }
2913 
2914   combine_merges++;
2915 
2916   /* Substitute in the latest insn for the regs set by the earlier ones.  */
2917 
2918   maxreg = max_reg_num ();
2919 
2920   subst_insn = i3;
2921 
2922 #ifndef HAVE_cc0
2923   /* Many machines that don't use CC0 have insns that can both perform an
2924      arithmetic operation and set the condition code.  These operations will
2925      be represented as a PARALLEL with the first element of the vector
2926      being a COMPARE of an arithmetic operation with the constant zero.
2927      The second element of the vector will set some pseudo to the result
2928      of the same arithmetic operation.  If we simplify the COMPARE, we won't
2929      match such a pattern and so will generate an extra insn.   Here we test
2930      for this case, where both the comparison and the operation result are
2931      needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
2932      I2SRC.  Later we will make the PARALLEL that contains I2.  */
2933 
2934   if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
2935       && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
2936       && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
2937       && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
2938     {
2939       rtx newpat_dest;
2940       rtx *cc_use_loc = NULL, cc_use_insn = NULL_RTX;
2941       rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
2942       enum machine_mode compare_mode, orig_compare_mode;
2943       enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
2944 
2945       newpat = PATTERN (i3);
2946       newpat_dest = SET_DEST (newpat);
2947       compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
2948 
2949       if (undobuf.other_insn == 0
2950 	  && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
2951 					    &cc_use_insn)))
2952 	{
2953 	  compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
2954 	  compare_code = simplify_compare_const (compare_code,
2955 						 op0, &op1);
2956 	  target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
2957 	}
2958 
2959       /* Do the rest only if op1 is const0_rtx, which may be the
2960 	 result of simplification.  */
2961       if (op1 == const0_rtx)
2962 	{
2963 	  /* If a single use of the CC is found, prepare to modify it
2964 	     when SELECT_CC_MODE returns a new CC-class mode, or when
2965 	     the above simplify_compare_const() returned a new comparison
2966 	     operator.  undobuf.other_insn is assigned the CC use insn
2967 	     when modifying it.  */
2968 	  if (cc_use_loc)
2969 	    {
2970 #ifdef SELECT_CC_MODE
2971 	      enum machine_mode new_mode
2972 		= SELECT_CC_MODE (compare_code, op0, op1);
2973 	      if (new_mode != orig_compare_mode
2974 		  && can_change_dest_mode (SET_DEST (newpat),
2975 					   added_sets_2, new_mode))
2976 		{
2977 		  unsigned int regno = REGNO (newpat_dest);
2978 		  compare_mode = new_mode;
2979 		  if (regno < FIRST_PSEUDO_REGISTER)
2980 		    newpat_dest = gen_rtx_REG (compare_mode, regno);
2981 		  else
2982 		    {
2983 		      SUBST_MODE (regno_reg_rtx[regno], compare_mode);
2984 		      newpat_dest = regno_reg_rtx[regno];
2985 		    }
2986 		}
2987 #endif
2988 	      /* Cases for modifying the CC-using comparison.  */
2989 	      if (compare_code != orig_compare_code
2990 		  /* ??? Do we need to verify the zero rtx?  */
2991 		  && XEXP (*cc_use_loc, 1) == const0_rtx)
2992 		{
2993 		  /* Replace cc_use_loc with entire new RTX.  */
2994 		  SUBST (*cc_use_loc,
2995 			 gen_rtx_fmt_ee (compare_code, compare_mode,
2996 					 newpat_dest, const0_rtx));
2997 		  undobuf.other_insn = cc_use_insn;
2998 		}
2999 	      else if (compare_mode != orig_compare_mode)
3000 		{
3001 		  /* Just replace the CC reg with a new mode.  */
3002 		  SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3003 		  undobuf.other_insn = cc_use_insn;
3004 		}
3005 	    }
3006 
3007 	  /* Now we modify the current newpat:
3008 	     First, SET_DEST(newpat) is updated if the CC mode has been
3009 	     altered. For targets without SELECT_CC_MODE, this should be
3010 	     optimized away.  */
3011 	  if (compare_mode != orig_compare_mode)
3012 	    SUBST (SET_DEST (newpat), newpat_dest);
3013 	  /* This is always done to propagate i2src into newpat.  */
3014 	  SUBST (SET_SRC (newpat),
3015 		 gen_rtx_COMPARE (compare_mode, op0, op1));
3016 	  /* Create new version of i2pat if needed; the below PARALLEL
3017 	     creation needs this to work correctly.  */
3018 	  if (! rtx_equal_p (i2src, op0))
3019 	    i2pat = gen_rtx_SET (VOIDmode, i2dest, op0);
3020 	  i2_is_used = 1;
3021 	}
3022     }
3023 #endif
3024 
3025   if (i2_is_used == 0)
3026     {
3027       /* It is possible that the source of I2 or I1 may be performing
3028 	 an unneeded operation, such as a ZERO_EXTEND of something
3029 	 that is known to have the high part zero.  Handle that case
3030 	 by letting subst look at the inner insns.
3031 
3032 	 Another way to do this would be to have a function that tries
3033 	 to simplify a single insn instead of merging two or more
3034 	 insns.  We don't do this because of the potential of infinite
3035 	 loops and because of the potential extra memory required.
3036 	 However, doing it the way we are is a bit of a kludge and
3037 	 doesn't catch all cases.
3038 
3039 	 But only do this if -fexpensive-optimizations since it slows
3040 	 things down and doesn't usually win.
3041 
3042 	 This is not done in the COMPARE case above because the
3043 	 unmodified I2PAT is used in the PARALLEL and so a pattern
3044 	 with a modified I2SRC would not match.  */
3045 
3046       if (flag_expensive_optimizations)
3047 	{
3048 	  /* Pass pc_rtx so no substitutions are done, just
3049 	     simplifications.  */
3050 	  if (i1)
3051 	    {
3052 	      subst_low_luid = DF_INSN_LUID (i1);
3053 	      i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3054 	    }
3055 
3056 	  subst_low_luid = DF_INSN_LUID (i2);
3057 	  i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3058 	}
3059 
3060       n_occurrences = 0;		/* `subst' counts here */
3061       subst_low_luid = DF_INSN_LUID (i2);
3062 
3063       /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3064 	 copy of I2SRC each time we substitute it, in order to avoid creating
3065 	 self-referential RTL when we will be substituting I1SRC for I1DEST
3066 	 later.  Likewise if I0 feeds into I2, either directly or indirectly
3067 	 through I1, and I0DEST is in I0SRC.  */
3068       newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3069 		      (i1_feeds_i2_n && i1dest_in_i1src)
3070 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3071 			  && i0dest_in_i0src));
3072       substed_i2 = 1;
3073 
3074       /* Record whether I2's body now appears within I3's body.  */
3075       i2_is_used = n_occurrences;
3076     }
3077 
3078   /* If we already got a failure, don't try to do more.  Otherwise, try to
3079      substitute I1 if we have it.  */
3080 
3081   if (i1 && GET_CODE (newpat) != CLOBBER)
3082     {
3083       /* Check that an autoincrement side-effect on I1 has not been lost.
3084 	 This happens if I1DEST is mentioned in I2 and dies there, and
3085 	 has disappeared from the new pattern.  */
3086       if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3087 	   && i1_feeds_i2_n
3088 	   && dead_or_set_p (i2, i1dest)
3089 	   && !reg_overlap_mentioned_p (i1dest, newpat))
3090 	   /* Before we can do this substitution, we must redo the test done
3091 	      above (see detailed comments there) that ensures I1DEST isn't
3092 	      mentioned in any SETs in NEWPAT that are field assignments.  */
3093 	  || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, NULL_RTX,
3094 				0, 0, 0))
3095 	{
3096 	  undo_all ();
3097 	  return 0;
3098 	}
3099 
3100       n_occurrences = 0;
3101       subst_low_luid = DF_INSN_LUID (i1);
3102 
3103       /* If the following substitution will modify I1SRC, make a copy of it
3104 	 for the case where it is substituted for I1DEST in I2PAT later.  */
3105       if (added_sets_2 && i1_feeds_i2_n)
3106 	i1src_copy = copy_rtx (i1src);
3107 
3108       /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3109 	 copy of I1SRC each time we substitute it, in order to avoid creating
3110 	 self-referential RTL when we will be substituting I0SRC for I0DEST
3111 	 later.  */
3112       newpat = subst (newpat, i1dest, i1src, 0, 0,
3113 		      i0_feeds_i1_n && i0dest_in_i0src);
3114       substed_i1 = 1;
3115 
3116       /* Record whether I1's body now appears within I3's body.  */
3117       i1_is_used = n_occurrences;
3118     }
3119 
3120   /* Likewise for I0 if we have it.  */
3121 
3122   if (i0 && GET_CODE (newpat) != CLOBBER)
3123     {
3124       if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3125 	   && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3126 	       || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3127 	   && !reg_overlap_mentioned_p (i0dest, newpat))
3128 	  || !combinable_i3pat (NULL_RTX, &newpat, i0dest, NULL_RTX, NULL_RTX,
3129 				0, 0, 0))
3130 	{
3131 	  undo_all ();
3132 	  return 0;
3133 	}
3134 
3135       /* If the following substitution will modify I0SRC, make a copy of it
3136 	 for the case where it is substituted for I0DEST in I1PAT later.  */
3137       if (added_sets_1 && i0_feeds_i1_n)
3138 	i0src_copy = copy_rtx (i0src);
3139       /* And a copy for I0DEST in I2PAT substitution.  */
3140       if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3141 			   || (i0_feeds_i2_n)))
3142 	i0src_copy2 = copy_rtx (i0src);
3143 
3144       n_occurrences = 0;
3145       subst_low_luid = DF_INSN_LUID (i0);
3146       newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3147       substed_i0 = 1;
3148     }
3149 
3150   /* Fail if an autoincrement side-effect has been duplicated.  Be careful
3151      to count all the ways that I2SRC and I1SRC can be used.  */
3152   if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3153        && i2_is_used + added_sets_2 > 1)
3154       || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3155 	  && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3156 	      > 1))
3157       || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3158 	  && (n_occurrences + added_sets_0
3159 	      + (added_sets_1 && i0_feeds_i1_n)
3160 	      + (added_sets_2 && i0_feeds_i2_n)
3161 	      > 1))
3162       /* Fail if we tried to make a new register.  */
3163       || max_reg_num () != maxreg
3164       /* Fail if we couldn't do something and have a CLOBBER.  */
3165       || GET_CODE (newpat) == CLOBBER
3166       /* Fail if this new pattern is a MULT and we didn't have one before
3167 	 at the outer level.  */
3168       || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3169 	  && ! have_mult))
3170     {
3171       undo_all ();
3172       return 0;
3173     }
3174 
3175   /* If the actions of the earlier insns must be kept
3176      in addition to substituting them into the latest one,
3177      we must make a new PARALLEL for the latest insn
3178      to hold additional the SETs.  */
3179 
3180   if (added_sets_0 || added_sets_1 || added_sets_2)
3181     {
3182       int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3183       combine_extras++;
3184 
3185       if (GET_CODE (newpat) == PARALLEL)
3186 	{
3187 	  rtvec old = XVEC (newpat, 0);
3188 	  total_sets = XVECLEN (newpat, 0) + extra_sets;
3189 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3190 	  memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3191 		  sizeof (old->elem[0]) * old->num_elem);
3192 	}
3193       else
3194 	{
3195 	  rtx old = newpat;
3196 	  total_sets = 1 + extra_sets;
3197 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3198 	  XVECEXP (newpat, 0, 0) = old;
3199 	}
3200 
3201       if (added_sets_0)
3202 	XVECEXP (newpat, 0, --total_sets) = i0pat;
3203 
3204       if (added_sets_1)
3205 	{
3206 	  rtx t = i1pat;
3207 	  if (i0_feeds_i1_n)
3208 	    t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3209 
3210 	  XVECEXP (newpat, 0, --total_sets) = t;
3211 	}
3212       if (added_sets_2)
3213 	{
3214 	  rtx t = i2pat;
3215 	  if (i1_feeds_i2_n)
3216 	    t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3217 		       i0_feeds_i1_n && i0dest_in_i0src);
3218 	  if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3219 	    t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3220 
3221 	  XVECEXP (newpat, 0, --total_sets) = t;
3222 	}
3223     }
3224 
3225  validate_replacement:
3226 
3227   /* Note which hard regs this insn has as inputs.  */
3228   mark_used_regs_combine (newpat);
3229 
3230   /* If recog_for_combine fails, it strips existing clobbers.  If we'll
3231      consider splitting this pattern, we might need these clobbers.  */
3232   if (i1 && GET_CODE (newpat) == PARALLEL
3233       && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3234     {
3235       int len = XVECLEN (newpat, 0);
3236 
3237       newpat_vec_with_clobbers = rtvec_alloc (len);
3238       for (i = 0; i < len; i++)
3239 	RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3240     }
3241 
3242   /* Is the result of combination a valid instruction?  */
3243   insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3244 
3245   /* If the result isn't valid, see if it is a PARALLEL of two SETs where
3246      the second SET's destination is a register that is unused and isn't
3247      marked as an instruction that might trap in an EH region.  In that case,
3248      we just need the first SET.   This can occur when simplifying a divmod
3249      insn.  We *must* test for this case here because the code below that
3250      splits two independent SETs doesn't handle this case correctly when it
3251      updates the register status.
3252 
3253      It's pointless doing this if we originally had two sets, one from
3254      i3, and one from i2.  Combining then splitting the parallel results
3255      in the original i2 again plus an invalid insn (which we delete).
3256      The net effect is only to move instructions around, which makes
3257      debug info less accurate.
3258 
3259      Also check the case where the first SET's destination is unused.
3260      That would not cause incorrect code, but does cause an unneeded
3261      insn to remain.  */
3262 
3263   if (insn_code_number < 0
3264       && !(added_sets_2 && i1 == 0)
3265       && GET_CODE (newpat) == PARALLEL
3266       && XVECLEN (newpat, 0) == 2
3267       && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3268       && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3269       && asm_noperands (newpat) < 0)
3270     {
3271       rtx set0 = XVECEXP (newpat, 0, 0);
3272       rtx set1 = XVECEXP (newpat, 0, 1);
3273 
3274       if (((REG_P (SET_DEST (set1))
3275 	    && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3276 	   || (GET_CODE (SET_DEST (set1)) == SUBREG
3277 	       && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3278 	  && insn_nothrow_p (i3)
3279 	  && !side_effects_p (SET_SRC (set1)))
3280 	{
3281 	  newpat = set0;
3282 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3283 	}
3284 
3285       else if (((REG_P (SET_DEST (set0))
3286 		 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3287 		|| (GET_CODE (SET_DEST (set0)) == SUBREG
3288 		    && find_reg_note (i3, REG_UNUSED,
3289 				      SUBREG_REG (SET_DEST (set0)))))
3290 	       && insn_nothrow_p (i3)
3291 	       && !side_effects_p (SET_SRC (set0)))
3292 	{
3293 	  newpat = set1;
3294 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3295 
3296 	  if (insn_code_number >= 0)
3297 	    changed_i3_dest = 1;
3298 	}
3299     }
3300 
3301   /* If we were combining three insns and the result is a simple SET
3302      with no ASM_OPERANDS that wasn't recognized, try to split it into two
3303      insns.  There are two ways to do this.  It can be split using a
3304      machine-specific method (like when you have an addition of a large
3305      constant) or by combine in the function find_split_point.  */
3306 
3307   if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3308       && asm_noperands (newpat) < 0)
3309     {
3310       rtx parallel, m_split, *split;
3311 
3312       /* See if the MD file can split NEWPAT.  If it can't, see if letting it
3313 	 use I2DEST as a scratch register will help.  In the latter case,
3314 	 convert I2DEST to the mode of the source of NEWPAT if we can.  */
3315 
3316       m_split = combine_split_insns (newpat, i3);
3317 
3318       /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3319 	 inputs of NEWPAT.  */
3320 
3321       /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3322 	 possible to try that as a scratch reg.  This would require adding
3323 	 more code to make it work though.  */
3324 
3325       if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3326 	{
3327 	  enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3328 
3329 	  /* First try to split using the original register as a
3330 	     scratch register.  */
3331 	  parallel = gen_rtx_PARALLEL (VOIDmode,
3332 				       gen_rtvec (2, newpat,
3333 						  gen_rtx_CLOBBER (VOIDmode,
3334 								   i2dest)));
3335 	  m_split = combine_split_insns (parallel, i3);
3336 
3337 	  /* If that didn't work, try changing the mode of I2DEST if
3338 	     we can.  */
3339 	  if (m_split == 0
3340 	      && new_mode != GET_MODE (i2dest)
3341 	      && new_mode != VOIDmode
3342 	      && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3343 	    {
3344 	      enum machine_mode old_mode = GET_MODE (i2dest);
3345 	      rtx ni2dest;
3346 
3347 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3348 		ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3349 	      else
3350 		{
3351 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3352 		  ni2dest = regno_reg_rtx[REGNO (i2dest)];
3353 		}
3354 
3355 	      parallel = (gen_rtx_PARALLEL
3356 			  (VOIDmode,
3357 			   gen_rtvec (2, newpat,
3358 				      gen_rtx_CLOBBER (VOIDmode,
3359 						       ni2dest))));
3360 	      m_split = combine_split_insns (parallel, i3);
3361 
3362 	      if (m_split == 0
3363 		  && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3364 		{
3365 		  struct undo *buf;
3366 
3367 		  adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3368 		  buf = undobuf.undos;
3369 		  undobuf.undos = buf->next;
3370 		  buf->next = undobuf.frees;
3371 		  undobuf.frees = buf;
3372 		}
3373 	    }
3374 
3375 	  i2scratch = m_split != 0;
3376 	}
3377 
3378       /* If recog_for_combine has discarded clobbers, try to use them
3379 	 again for the split.  */
3380       if (m_split == 0 && newpat_vec_with_clobbers)
3381 	{
3382 	  parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3383 	  m_split = combine_split_insns (parallel, i3);
3384 	}
3385 
3386       if (m_split && NEXT_INSN (m_split) == NULL_RTX)
3387 	{
3388 	  m_split = PATTERN (m_split);
3389 	  insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
3390 	  if (insn_code_number >= 0)
3391 	    newpat = m_split;
3392 	}
3393       else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
3394 	       && (next_nonnote_nondebug_insn (i2) == i3
3395 		   || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
3396 	{
3397 	  rtx i2set, i3set;
3398 	  rtx newi3pat = PATTERN (NEXT_INSN (m_split));
3399 	  newi2pat = PATTERN (m_split);
3400 
3401 	  i3set = single_set (NEXT_INSN (m_split));
3402 	  i2set = single_set (m_split);
3403 
3404 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3405 
3406 	  /* If I2 or I3 has multiple SETs, we won't know how to track
3407 	     register status, so don't use these insns.  If I2's destination
3408 	     is used between I2 and I3, we also can't use these insns.  */
3409 
3410 	  if (i2_code_number >= 0 && i2set && i3set
3411 	      && (next_nonnote_nondebug_insn (i2) == i3
3412 		  || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3413 	    insn_code_number = recog_for_combine (&newi3pat, i3,
3414 						  &new_i3_notes);
3415 	  if (insn_code_number >= 0)
3416 	    newpat = newi3pat;
3417 
3418 	  /* It is possible that both insns now set the destination of I3.
3419 	     If so, we must show an extra use of it.  */
3420 
3421 	  if (insn_code_number >= 0)
3422 	    {
3423 	      rtx new_i3_dest = SET_DEST (i3set);
3424 	      rtx new_i2_dest = SET_DEST (i2set);
3425 
3426 	      while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3427 		     || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3428 		     || GET_CODE (new_i3_dest) == SUBREG)
3429 		new_i3_dest = XEXP (new_i3_dest, 0);
3430 
3431 	      while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3432 		     || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3433 		     || GET_CODE (new_i2_dest) == SUBREG)
3434 		new_i2_dest = XEXP (new_i2_dest, 0);
3435 
3436 	      if (REG_P (new_i3_dest)
3437 		  && REG_P (new_i2_dest)
3438 		  && REGNO (new_i3_dest) == REGNO (new_i2_dest))
3439 		INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3440 	    }
3441 	}
3442 
3443       /* If we can split it and use I2DEST, go ahead and see if that
3444 	 helps things be recognized.  Verify that none of the registers
3445 	 are set between I2 and I3.  */
3446       if (insn_code_number < 0
3447           && (split = find_split_point (&newpat, i3, false)) != 0
3448 #ifdef HAVE_cc0
3449 	  && REG_P (i2dest)
3450 #endif
3451 	  /* We need I2DEST in the proper mode.  If it is a hard register
3452 	     or the only use of a pseudo, we can change its mode.
3453 	     Make sure we don't change a hard register to have a mode that
3454 	     isn't valid for it, or change the number of registers.  */
3455 	  && (GET_MODE (*split) == GET_MODE (i2dest)
3456 	      || GET_MODE (*split) == VOIDmode
3457 	      || can_change_dest_mode (i2dest, added_sets_2,
3458 				       GET_MODE (*split)))
3459 	  && (next_nonnote_nondebug_insn (i2) == i3
3460 	      || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3461 	  /* We can't overwrite I2DEST if its value is still used by
3462 	     NEWPAT.  */
3463 	  && ! reg_referenced_p (i2dest, newpat))
3464 	{
3465 	  rtx newdest = i2dest;
3466 	  enum rtx_code split_code = GET_CODE (*split);
3467 	  enum machine_mode split_mode = GET_MODE (*split);
3468 	  bool subst_done = false;
3469 	  newi2pat = NULL_RTX;
3470 
3471 	  i2scratch = true;
3472 
3473 	  /* *SPLIT may be part of I2SRC, so make sure we have the
3474 	     original expression around for later debug processing.
3475 	     We should not need I2SRC any more in other cases.  */
3476 	  if (MAY_HAVE_DEBUG_INSNS)
3477 	    i2src = copy_rtx (i2src);
3478 	  else
3479 	    i2src = NULL;
3480 
3481 	  /* Get NEWDEST as a register in the proper mode.  We have already
3482 	     validated that we can do this.  */
3483 	  if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3484 	    {
3485 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3486 		newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3487 	      else
3488 		{
3489 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3490 		  newdest = regno_reg_rtx[REGNO (i2dest)];
3491 		}
3492 	    }
3493 
3494 	  /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3495 	     an ASHIFT.  This can occur if it was inside a PLUS and hence
3496 	     appeared to be a memory address.  This is a kludge.  */
3497 	  if (split_code == MULT
3498 	      && CONST_INT_P (XEXP (*split, 1))
3499 	      && INTVAL (XEXP (*split, 1)) > 0
3500 	      && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3501 	    {
3502 	      SUBST (*split, gen_rtx_ASHIFT (split_mode,
3503 					     XEXP (*split, 0), GEN_INT (i)));
3504 	      /* Update split_code because we may not have a multiply
3505 		 anymore.  */
3506 	      split_code = GET_CODE (*split);
3507 	    }
3508 
3509 #ifdef INSN_SCHEDULING
3510 	  /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3511 	     be written as a ZERO_EXTEND.  */
3512 	  if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3513 	    {
3514 #ifdef LOAD_EXTEND_OP
3515 	      /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3516 		 what it really is.  */
3517 	      if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3518 		  == SIGN_EXTEND)
3519 		SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3520 						    SUBREG_REG (*split)));
3521 	      else
3522 #endif
3523 		SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3524 						    SUBREG_REG (*split)));
3525 	    }
3526 #endif
3527 
3528 	  /* Attempt to split binary operators using arithmetic identities.  */
3529 	  if (BINARY_P (SET_SRC (newpat))
3530 	      && split_mode == GET_MODE (SET_SRC (newpat))
3531 	      && ! side_effects_p (SET_SRC (newpat)))
3532 	    {
3533 	      rtx setsrc = SET_SRC (newpat);
3534 	      enum machine_mode mode = GET_MODE (setsrc);
3535 	      enum rtx_code code = GET_CODE (setsrc);
3536 	      rtx src_op0 = XEXP (setsrc, 0);
3537 	      rtx src_op1 = XEXP (setsrc, 1);
3538 
3539 	      /* Split "X = Y op Y" as "Z = Y; X = Z op Z".  */
3540 	      if (rtx_equal_p (src_op0, src_op1))
3541 		{
3542 		  newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
3543 		  SUBST (XEXP (setsrc, 0), newdest);
3544 		  SUBST (XEXP (setsrc, 1), newdest);
3545 		  subst_done = true;
3546 		}
3547 	      /* Split "((P op Q) op R) op S" where op is PLUS or MULT.  */
3548 	      else if ((code == PLUS || code == MULT)
3549 		       && GET_CODE (src_op0) == code
3550 		       && GET_CODE (XEXP (src_op0, 0)) == code
3551 		       && (INTEGRAL_MODE_P (mode)
3552 			   || (FLOAT_MODE_P (mode)
3553 			       && flag_unsafe_math_optimizations)))
3554 		{
3555 		  rtx p = XEXP (XEXP (src_op0, 0), 0);
3556 		  rtx q = XEXP (XEXP (src_op0, 0), 1);
3557 		  rtx r = XEXP (src_op0, 1);
3558 		  rtx s = src_op1;
3559 
3560 		  /* Split both "((X op Y) op X) op Y" and
3561 		     "((X op Y) op Y) op X" as "T op T" where T is
3562 		     "X op Y".  */
3563 		  if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3564 		       || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3565 		    {
3566 		      newi2pat = gen_rtx_SET (VOIDmode, newdest,
3567 					      XEXP (src_op0, 0));
3568 		      SUBST (XEXP (setsrc, 0), newdest);
3569 		      SUBST (XEXP (setsrc, 1), newdest);
3570 		      subst_done = true;
3571 		    }
3572 		  /* Split "((X op X) op Y) op Y)" as "T op T" where
3573 		     T is "X op Y".  */
3574 		  else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3575 		    {
3576 		      rtx tmp = simplify_gen_binary (code, mode, p, r);
3577 		      newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
3578 		      SUBST (XEXP (setsrc, 0), newdest);
3579 		      SUBST (XEXP (setsrc, 1), newdest);
3580 		      subst_done = true;
3581 		    }
3582 		}
3583 	    }
3584 
3585 	  if (!subst_done)
3586 	    {
3587 	      newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
3588 	      SUBST (*split, newdest);
3589 	    }
3590 
3591 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3592 
3593 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3594 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3595 	  if (GET_CODE (newi2pat) == PARALLEL)
3596 	    for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3597 	      if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3598 		{
3599 		  rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3600 		  if (reg_overlap_mentioned_p (reg, newpat))
3601 		    {
3602 		      undo_all ();
3603 		      return 0;
3604 		    }
3605 		}
3606 
3607 	  /* If the split point was a MULT and we didn't have one before,
3608 	     don't use one now.  */
3609 	  if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3610 	    insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3611 	}
3612     }
3613 
3614   /* Check for a case where we loaded from memory in a narrow mode and
3615      then sign extended it, but we need both registers.  In that case,
3616      we have a PARALLEL with both loads from the same memory location.
3617      We can split this into a load from memory followed by a register-register
3618      copy.  This saves at least one insn, more if register allocation can
3619      eliminate the copy.
3620 
3621      We cannot do this if the destination of the first assignment is a
3622      condition code register or cc0.  We eliminate this case by making sure
3623      the SET_DEST and SET_SRC have the same mode.
3624 
3625      We cannot do this if the destination of the second assignment is
3626      a register that we have already assumed is zero-extended.  Similarly
3627      for a SUBREG of such a register.  */
3628 
3629   else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3630 	   && GET_CODE (newpat) == PARALLEL
3631 	   && XVECLEN (newpat, 0) == 2
3632 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3633 	   && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3634 	   && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3635 	       == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3636 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3637 	   && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3638 			   XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3639 	   && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3640 				   DF_INSN_LUID (i2))
3641 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3642 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3643 	   && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
3644 		 (REG_P (temp)
3645 		  && reg_stat[REGNO (temp)].nonzero_bits != 0
3646 		  && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
3647 		  && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
3648 		  && (reg_stat[REGNO (temp)].nonzero_bits
3649 		      != GET_MODE_MASK (word_mode))))
3650 	   && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3651 		 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3652 		     (REG_P (temp)
3653 		      && reg_stat[REGNO (temp)].nonzero_bits != 0
3654 		      && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
3655 		      && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
3656 		      && (reg_stat[REGNO (temp)].nonzero_bits
3657 			  != GET_MODE_MASK (word_mode)))))
3658 	   && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3659 					 SET_SRC (XVECEXP (newpat, 0, 1)))
3660 	   && ! find_reg_note (i3, REG_UNUSED,
3661 			       SET_DEST (XVECEXP (newpat, 0, 0))))
3662     {
3663       rtx ni2dest;
3664 
3665       newi2pat = XVECEXP (newpat, 0, 0);
3666       ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3667       newpat = XVECEXP (newpat, 0, 1);
3668       SUBST (SET_SRC (newpat),
3669 	     gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3670       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3671 
3672       if (i2_code_number >= 0)
3673 	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3674 
3675       if (insn_code_number >= 0)
3676 	swap_i2i3 = 1;
3677     }
3678 
3679   /* Similarly, check for a case where we have a PARALLEL of two independent
3680      SETs but we started with three insns.  In this case, we can do the sets
3681      as two separate insns.  This case occurs when some SET allows two
3682      other insns to combine, but the destination of that SET is still live.  */
3683 
3684   else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3685 	   && GET_CODE (newpat) == PARALLEL
3686 	   && XVECLEN (newpat, 0) == 2
3687 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3688 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3689 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3690 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3691 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3692 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3693 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3694 				  XVECEXP (newpat, 0, 0))
3695 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3696 				  XVECEXP (newpat, 0, 1))
3697 	   && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3698 		 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3699     {
3700       /* Normally, it doesn't matter which of the two is done first,
3701 	 but the one that references cc0 can't be the second, and
3702 	 one which uses any regs/memory set in between i2 and i3 can't
3703 	 be first.  */
3704       if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3705 			      DF_INSN_LUID (i2))
3706 #ifdef HAVE_cc0
3707 	  && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
3708 #endif
3709 	 )
3710 	{
3711 	  newi2pat = XVECEXP (newpat, 0, 1);
3712 	  newpat = XVECEXP (newpat, 0, 0);
3713 	}
3714       else if (!use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 0)),
3715 				   DF_INSN_LUID (i2))
3716 #ifdef HAVE_cc0
3717 	       && !reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1))
3718 #endif
3719 	      )
3720 	{
3721 	  newi2pat = XVECEXP (newpat, 0, 0);
3722 	  newpat = XVECEXP (newpat, 0, 1);
3723 	}
3724       else
3725 	{
3726 	  undo_all ();
3727 	  return 0;
3728 	}
3729 
3730       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3731 
3732       if (i2_code_number >= 0)
3733 	{
3734 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3735 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3736 	  if (GET_CODE (newi2pat) == PARALLEL)
3737 	    {
3738 	      for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3739 		if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3740 		  {
3741 		    rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3742 		    if (reg_overlap_mentioned_p (reg, newpat))
3743 		      {
3744 			undo_all ();
3745 			return 0;
3746 		      }
3747 		  }
3748 	    }
3749 
3750 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3751 	}
3752     }
3753 
3754   /* If it still isn't recognized, fail and change things back the way they
3755      were.  */
3756   if ((insn_code_number < 0
3757        /* Is the result a reasonable ASM_OPERANDS?  */
3758        && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3759     {
3760       undo_all ();
3761       return 0;
3762     }
3763 
3764   /* If we had to change another insn, make sure it is valid also.  */
3765   if (undobuf.other_insn)
3766     {
3767       CLEAR_HARD_REG_SET (newpat_used_regs);
3768 
3769       other_pat = PATTERN (undobuf.other_insn);
3770       other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
3771 					     &new_other_notes);
3772 
3773       if (other_code_number < 0 && ! check_asm_operands (other_pat))
3774 	{
3775 	  undo_all ();
3776 	  return 0;
3777 	}
3778     }
3779 
3780 #ifdef HAVE_cc0
3781   /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
3782      they are adjacent to each other or not.  */
3783   {
3784     rtx p = prev_nonnote_insn (i3);
3785     if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
3786 	&& sets_cc0_p (newi2pat))
3787       {
3788 	undo_all ();
3789 	return 0;
3790       }
3791   }
3792 #endif
3793 
3794   /* Only allow this combination if insn_rtx_costs reports that the
3795      replacement instructions are cheaper than the originals.  */
3796   if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
3797     {
3798       undo_all ();
3799       return 0;
3800     }
3801 
3802   if (MAY_HAVE_DEBUG_INSNS)
3803     {
3804       struct undo *undo;
3805 
3806       for (undo = undobuf.undos; undo; undo = undo->next)
3807 	if (undo->kind == UNDO_MODE)
3808 	  {
3809 	    rtx reg = *undo->where.r;
3810 	    enum machine_mode new_mode = GET_MODE (reg);
3811 	    enum machine_mode old_mode = undo->old_contents.m;
3812 
3813 	    /* Temporarily revert mode back.  */
3814 	    adjust_reg_mode (reg, old_mode);
3815 
3816 	    if (reg == i2dest && i2scratch)
3817 	      {
3818 		/* If we used i2dest as a scratch register with a
3819 		   different mode, substitute it for the original
3820 		   i2src while its original mode is temporarily
3821 		   restored, and then clear i2scratch so that we don't
3822 		   do it again later.  */
3823 		propagate_for_debug (i2, last_combined_insn, reg, i2src,
3824 				     this_basic_block);
3825 		i2scratch = false;
3826 		/* Put back the new mode.  */
3827 		adjust_reg_mode (reg, new_mode);
3828 	      }
3829 	    else
3830 	      {
3831 		rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
3832 		rtx first, last;
3833 
3834 		if (reg == i2dest)
3835 		  {
3836 		    first = i2;
3837 		    last = last_combined_insn;
3838 		  }
3839 		else
3840 		  {
3841 		    first = i3;
3842 		    last = undobuf.other_insn;
3843 		    gcc_assert (last);
3844 		    if (DF_INSN_LUID (last)
3845 			< DF_INSN_LUID (last_combined_insn))
3846 		      last = last_combined_insn;
3847 		  }
3848 
3849 		/* We're dealing with a reg that changed mode but not
3850 		   meaning, so we want to turn it into a subreg for
3851 		   the new mode.  However, because of REG sharing and
3852 		   because its mode had already changed, we have to do
3853 		   it in two steps.  First, replace any debug uses of
3854 		   reg, with its original mode temporarily restored,
3855 		   with this copy we have created; then, replace the
3856 		   copy with the SUBREG of the original shared reg,
3857 		   once again changed to the new mode.  */
3858 		propagate_for_debug (first, last, reg, tempreg,
3859 				     this_basic_block);
3860 		adjust_reg_mode (reg, new_mode);
3861 		propagate_for_debug (first, last, tempreg,
3862 				     lowpart_subreg (old_mode, reg, new_mode),
3863 				     this_basic_block);
3864 	      }
3865 	  }
3866     }
3867 
3868   /* If we will be able to accept this, we have made a
3869      change to the destination of I3.  This requires us to
3870      do a few adjustments.  */
3871 
3872   if (changed_i3_dest)
3873     {
3874       PATTERN (i3) = newpat;
3875       adjust_for_new_dest (i3);
3876     }
3877 
3878   /* We now know that we can do this combination.  Merge the insns and
3879      update the status of registers and LOG_LINKS.  */
3880 
3881   if (undobuf.other_insn)
3882     {
3883       rtx note, next;
3884 
3885       PATTERN (undobuf.other_insn) = other_pat;
3886 
3887       /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
3888 	 ensure that they are still valid.  Then add any non-duplicate
3889 	 notes added by recog_for_combine.  */
3890       for (note = REG_NOTES (undobuf.other_insn); note; note = next)
3891 	{
3892 	  next = XEXP (note, 1);
3893 
3894 	  if ((REG_NOTE_KIND (note) == REG_DEAD
3895 	       && !reg_referenced_p (XEXP (note, 0),
3896 				     PATTERN (undobuf.other_insn)))
3897 	      ||(REG_NOTE_KIND (note) == REG_UNUSED
3898 		 && !reg_set_p (XEXP (note, 0),
3899 				PATTERN (undobuf.other_insn))))
3900 	    remove_note (undobuf.other_insn, note);
3901 	}
3902 
3903       distribute_notes (new_other_notes, undobuf.other_insn,
3904 			undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX,
3905 			NULL_RTX);
3906     }
3907 
3908   if (swap_i2i3)
3909     {
3910       rtx insn;
3911       struct insn_link *link;
3912       rtx ni2dest;
3913 
3914       /* I3 now uses what used to be its destination and which is now
3915 	 I2's destination.  This requires us to do a few adjustments.  */
3916       PATTERN (i3) = newpat;
3917       adjust_for_new_dest (i3);
3918 
3919       /* We need a LOG_LINK from I3 to I2.  But we used to have one,
3920 	 so we still will.
3921 
3922 	 However, some later insn might be using I2's dest and have
3923 	 a LOG_LINK pointing at I3.  We must remove this link.
3924 	 The simplest way to remove the link is to point it at I1,
3925 	 which we know will be a NOTE.  */
3926 
3927       /* newi2pat is usually a SET here; however, recog_for_combine might
3928 	 have added some clobbers.  */
3929       if (GET_CODE (newi2pat) == PARALLEL)
3930 	ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
3931       else
3932 	ni2dest = SET_DEST (newi2pat);
3933 
3934       for (insn = NEXT_INSN (i3);
3935 	   insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
3936 		    || insn != BB_HEAD (this_basic_block->next_bb));
3937 	   insn = NEXT_INSN (insn))
3938 	{
3939 	  if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
3940 	    {
3941 	      FOR_EACH_LOG_LINK (link, insn)
3942 		if (link->insn == i3)
3943 		  link->insn = i1;
3944 
3945 	      break;
3946 	    }
3947 	}
3948     }
3949 
3950   {
3951     rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
3952     struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
3953     rtx midnotes = 0;
3954     int from_luid;
3955     /* Compute which registers we expect to eliminate.  newi2pat may be setting
3956        either i3dest or i2dest, so we must check it.  Also, i1dest may be the
3957        same as i3dest, in which case newi2pat may be setting i1dest.  */
3958     rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
3959 		   || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
3960 		   || !i2dest_killed
3961 		   ? 0 : i2dest);
3962     rtx elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
3963 		   || (newi2pat && reg_set_p (i1dest, newi2pat))
3964 		   || !i1dest_killed
3965 		   ? 0 : i1dest);
3966     rtx elim_i0 = (i0 == 0 || i0dest_in_i0src
3967 		   || (newi2pat && reg_set_p (i0dest, newi2pat))
3968 		   || !i0dest_killed
3969 		   ? 0 : i0dest);
3970 
3971     /* Get the old REG_NOTES and LOG_LINKS from all our insns and
3972        clear them.  */
3973     i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
3974     i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
3975     if (i1)
3976       i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
3977     if (i0)
3978       i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
3979 
3980     /* Ensure that we do not have something that should not be shared but
3981        occurs multiple times in the new insns.  Check this by first
3982        resetting all the `used' flags and then copying anything is shared.  */
3983 
3984     reset_used_flags (i3notes);
3985     reset_used_flags (i2notes);
3986     reset_used_flags (i1notes);
3987     reset_used_flags (i0notes);
3988     reset_used_flags (newpat);
3989     reset_used_flags (newi2pat);
3990     if (undobuf.other_insn)
3991       reset_used_flags (PATTERN (undobuf.other_insn));
3992 
3993     i3notes = copy_rtx_if_shared (i3notes);
3994     i2notes = copy_rtx_if_shared (i2notes);
3995     i1notes = copy_rtx_if_shared (i1notes);
3996     i0notes = copy_rtx_if_shared (i0notes);
3997     newpat = copy_rtx_if_shared (newpat);
3998     newi2pat = copy_rtx_if_shared (newi2pat);
3999     if (undobuf.other_insn)
4000       reset_used_flags (PATTERN (undobuf.other_insn));
4001 
4002     INSN_CODE (i3) = insn_code_number;
4003     PATTERN (i3) = newpat;
4004 
4005     if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4006       {
4007 	rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4008 
4009 	reset_used_flags (call_usage);
4010 	call_usage = copy_rtx (call_usage);
4011 
4012 	if (substed_i2)
4013 	  {
4014 	    /* I2SRC must still be meaningful at this point.  Some splitting
4015 	       operations can invalidate I2SRC, but those operations do not
4016 	       apply to calls.  */
4017 	    gcc_assert (i2src);
4018 	    replace_rtx (call_usage, i2dest, i2src);
4019 	  }
4020 
4021 	if (substed_i1)
4022 	  replace_rtx (call_usage, i1dest, i1src);
4023 	if (substed_i0)
4024 	  replace_rtx (call_usage, i0dest, i0src);
4025 
4026 	CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4027       }
4028 
4029     if (undobuf.other_insn)
4030       INSN_CODE (undobuf.other_insn) = other_code_number;
4031 
4032     /* We had one special case above where I2 had more than one set and
4033        we replaced a destination of one of those sets with the destination
4034        of I3.  In that case, we have to update LOG_LINKS of insns later
4035        in this basic block.  Note that this (expensive) case is rare.
4036 
4037        Also, in this case, we must pretend that all REG_NOTEs for I2
4038        actually came from I3, so that REG_UNUSED notes from I2 will be
4039        properly handled.  */
4040 
4041     if (i3_subst_into_i2)
4042       {
4043 	for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4044 	  if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4045 	       || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4046 	      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4047 	      && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4048 	      && ! find_reg_note (i2, REG_UNUSED,
4049 				  SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4050 	    for (temp = NEXT_INSN (i2);
4051 		 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
4052 			  || BB_HEAD (this_basic_block) != temp);
4053 		 temp = NEXT_INSN (temp))
4054 	      if (temp != i3 && INSN_P (temp))
4055 		FOR_EACH_LOG_LINK (link, temp)
4056 		  if (link->insn == i2)
4057 		    link->insn = i3;
4058 
4059 	if (i3notes)
4060 	  {
4061 	    rtx link = i3notes;
4062 	    while (XEXP (link, 1))
4063 	      link = XEXP (link, 1);
4064 	    XEXP (link, 1) = i2notes;
4065 	  }
4066 	else
4067 	  i3notes = i2notes;
4068 	i2notes = 0;
4069       }
4070 
4071     LOG_LINKS (i3) = NULL;
4072     REG_NOTES (i3) = 0;
4073     LOG_LINKS (i2) = NULL;
4074     REG_NOTES (i2) = 0;
4075 
4076     if (newi2pat)
4077       {
4078 	if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4079 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4080 			       this_basic_block);
4081 	INSN_CODE (i2) = i2_code_number;
4082 	PATTERN (i2) = newi2pat;
4083       }
4084     else
4085       {
4086 	if (MAY_HAVE_DEBUG_INSNS && i2src)
4087 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4088 			       this_basic_block);
4089 	SET_INSN_DELETED (i2);
4090       }
4091 
4092     if (i1)
4093       {
4094 	LOG_LINKS (i1) = NULL;
4095 	REG_NOTES (i1) = 0;
4096 	if (MAY_HAVE_DEBUG_INSNS)
4097 	  propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4098 			       this_basic_block);
4099 	SET_INSN_DELETED (i1);
4100       }
4101 
4102     if (i0)
4103       {
4104 	LOG_LINKS (i0) = NULL;
4105 	REG_NOTES (i0) = 0;
4106 	if (MAY_HAVE_DEBUG_INSNS)
4107 	  propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4108 			       this_basic_block);
4109 	SET_INSN_DELETED (i0);
4110       }
4111 
4112     /* Get death notes for everything that is now used in either I3 or
4113        I2 and used to die in a previous insn.  If we built two new
4114        patterns, move from I1 to I2 then I2 to I3 so that we get the
4115        proper movement on registers that I2 modifies.  */
4116 
4117     if (i0)
4118       from_luid = DF_INSN_LUID (i0);
4119     else if (i1)
4120       from_luid = DF_INSN_LUID (i1);
4121     else
4122       from_luid = DF_INSN_LUID (i2);
4123     if (newi2pat)
4124       move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4125     move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4126 
4127     /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3.  */
4128     if (i3notes)
4129       distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
4130 			elim_i2, elim_i1, elim_i0);
4131     if (i2notes)
4132       distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
4133 			elim_i2, elim_i1, elim_i0);
4134     if (i1notes)
4135       distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
4136 			elim_i2, elim_i1, elim_i0);
4137     if (i0notes)
4138       distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL_RTX,
4139 			elim_i2, elim_i1, elim_i0);
4140     if (midnotes)
4141       distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4142 			elim_i2, elim_i1, elim_i0);
4143 
4144     /* Distribute any notes added to I2 or I3 by recog_for_combine.  We
4145        know these are REG_UNUSED and want them to go to the desired insn,
4146        so we always pass it as i3.  */
4147 
4148     if (newi2pat && new_i2_notes)
4149       distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX,
4150 			NULL_RTX);
4151 
4152     if (new_i3_notes)
4153       distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX,
4154 			NULL_RTX);
4155 
4156     /* If I3DEST was used in I3SRC, it really died in I3.  We may need to
4157        put a REG_DEAD note for it somewhere.  If NEWI2PAT exists and sets
4158        I3DEST, the death must be somewhere before I2, not I3.  If we passed I3
4159        in that case, it might delete I2.  Similarly for I2 and I1.
4160        Show an additional death due to the REG_DEAD note we make here.  If
4161        we discard it in distribute_notes, we will decrement it again.  */
4162 
4163     if (i3dest_killed)
4164       {
4165 	if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4166 	  distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4167 					    NULL_RTX),
4168 			    NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1, elim_i0);
4169 	else
4170 	  distribute_notes (alloc_reg_note (REG_DEAD, i3dest_killed,
4171 					    NULL_RTX),
4172 			    NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4173 			    elim_i2, elim_i1, elim_i0);
4174       }
4175 
4176     if (i2dest_in_i2src)
4177       {
4178 	rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4179 	if (newi2pat && reg_set_p (i2dest, newi2pat))
4180 	  distribute_notes (new_note,  NULL_RTX, i2, NULL_RTX, NULL_RTX,
4181 			    NULL_RTX, NULL_RTX);
4182 	else
4183 	  distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4184 			    NULL_RTX, NULL_RTX, NULL_RTX);
4185       }
4186 
4187     if (i1dest_in_i1src)
4188       {
4189 	rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4190 	if (newi2pat && reg_set_p (i1dest, newi2pat))
4191 	  distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4192 			    NULL_RTX, NULL_RTX);
4193 	else
4194 	  distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4195 			    NULL_RTX, NULL_RTX, NULL_RTX);
4196       }
4197 
4198     if (i0dest_in_i0src)
4199       {
4200 	rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4201 	if (newi2pat && reg_set_p (i0dest, newi2pat))
4202 	  distribute_notes (new_note, NULL_RTX, i2, NULL_RTX, NULL_RTX,
4203 			    NULL_RTX, NULL_RTX);
4204 	else
4205 	  distribute_notes (new_note, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
4206 			    NULL_RTX, NULL_RTX, NULL_RTX);
4207       }
4208 
4209     distribute_links (i3links);
4210     distribute_links (i2links);
4211     distribute_links (i1links);
4212     distribute_links (i0links);
4213 
4214     if (REG_P (i2dest))
4215       {
4216 	struct insn_link *link;
4217 	rtx i2_insn = 0, i2_val = 0, set;
4218 
4219 	/* The insn that used to set this register doesn't exist, and
4220 	   this life of the register may not exist either.  See if one of
4221 	   I3's links points to an insn that sets I2DEST.  If it does,
4222 	   that is now the last known value for I2DEST. If we don't update
4223 	   this and I2 set the register to a value that depended on its old
4224 	   contents, we will get confused.  If this insn is used, thing
4225 	   will be set correctly in combine_instructions.  */
4226 	FOR_EACH_LOG_LINK (link, i3)
4227 	  if ((set = single_set (link->insn)) != 0
4228 	      && rtx_equal_p (i2dest, SET_DEST (set)))
4229 	    i2_insn = link->insn, i2_val = SET_SRC (set);
4230 
4231 	record_value_for_reg (i2dest, i2_insn, i2_val);
4232 
4233 	/* If the reg formerly set in I2 died only once and that was in I3,
4234 	   zero its use count so it won't make `reload' do any work.  */
4235 	if (! added_sets_2
4236 	    && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4237 	    && ! i2dest_in_i2src)
4238 	  INC_REG_N_SETS (REGNO (i2dest), -1);
4239       }
4240 
4241     if (i1 && REG_P (i1dest))
4242       {
4243 	struct insn_link *link;
4244 	rtx i1_insn = 0, i1_val = 0, set;
4245 
4246 	FOR_EACH_LOG_LINK (link, i3)
4247 	  if ((set = single_set (link->insn)) != 0
4248 	      && rtx_equal_p (i1dest, SET_DEST (set)))
4249 	    i1_insn = link->insn, i1_val = SET_SRC (set);
4250 
4251 	record_value_for_reg (i1dest, i1_insn, i1_val);
4252 
4253 	if (! added_sets_1 && ! i1dest_in_i1src)
4254 	  INC_REG_N_SETS (REGNO (i1dest), -1);
4255       }
4256 
4257     if (i0 && REG_P (i0dest))
4258       {
4259 	struct insn_link *link;
4260 	rtx i0_insn = 0, i0_val = 0, set;
4261 
4262 	FOR_EACH_LOG_LINK (link, i3)
4263 	  if ((set = single_set (link->insn)) != 0
4264 	      && rtx_equal_p (i0dest, SET_DEST (set)))
4265 	    i0_insn = link->insn, i0_val = SET_SRC (set);
4266 
4267 	record_value_for_reg (i0dest, i0_insn, i0_val);
4268 
4269 	if (! added_sets_0 && ! i0dest_in_i0src)
4270 	  INC_REG_N_SETS (REGNO (i0dest), -1);
4271       }
4272 
4273     /* Update reg_stat[].nonzero_bits et al for any changes that may have
4274        been made to this insn.  The order of
4275        set_nonzero_bits_and_sign_copies() is important.  Because newi2pat
4276        can affect nonzero_bits of newpat */
4277     if (newi2pat)
4278       note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4279     note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4280   }
4281 
4282   if (undobuf.other_insn != NULL_RTX)
4283     {
4284       if (dump_file)
4285 	{
4286 	  fprintf (dump_file, "modifying other_insn ");
4287 	  dump_insn_slim (dump_file, undobuf.other_insn);
4288 	}
4289       df_insn_rescan (undobuf.other_insn);
4290     }
4291 
4292   if (i0 && !(NOTE_P(i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4293     {
4294       if (dump_file)
4295 	{
4296 	  fprintf (dump_file, "modifying insn i1 ");
4297 	  dump_insn_slim (dump_file, i0);
4298 	}
4299       df_insn_rescan (i0);
4300     }
4301 
4302   if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4303     {
4304       if (dump_file)
4305 	{
4306 	  fprintf (dump_file, "modifying insn i1 ");
4307 	  dump_insn_slim (dump_file, i1);
4308 	}
4309       df_insn_rescan (i1);
4310     }
4311 
4312   if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4313     {
4314       if (dump_file)
4315 	{
4316 	  fprintf (dump_file, "modifying insn i2 ");
4317 	  dump_insn_slim (dump_file, i2);
4318 	}
4319       df_insn_rescan (i2);
4320     }
4321 
4322   if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4323     {
4324       if (dump_file)
4325 	{
4326 	  fprintf (dump_file, "modifying insn i3 ");
4327 	  dump_insn_slim (dump_file, i3);
4328 	}
4329       df_insn_rescan (i3);
4330     }
4331 
4332   /* Set new_direct_jump_p if a new return or simple jump instruction
4333      has been created.  Adjust the CFG accordingly.  */
4334 
4335   if (returnjump_p (i3) || any_uncondjump_p (i3))
4336     {
4337       *new_direct_jump_p = 1;
4338       mark_jump_label (PATTERN (i3), i3, 0);
4339       update_cfg_for_uncondjump (i3);
4340     }
4341 
4342   if (undobuf.other_insn != NULL_RTX
4343       && (returnjump_p (undobuf.other_insn)
4344 	  || any_uncondjump_p (undobuf.other_insn)))
4345     {
4346       *new_direct_jump_p = 1;
4347       update_cfg_for_uncondjump (undobuf.other_insn);
4348     }
4349 
4350   /* A noop might also need cleaning up of CFG, if it comes from the
4351      simplification of a jump.  */
4352   if (JUMP_P (i3)
4353       && GET_CODE (newpat) == SET
4354       && SET_SRC (newpat) == pc_rtx
4355       && SET_DEST (newpat) == pc_rtx)
4356     {
4357       *new_direct_jump_p = 1;
4358       update_cfg_for_uncondjump (i3);
4359     }
4360 
4361   if (undobuf.other_insn != NULL_RTX
4362       && JUMP_P (undobuf.other_insn)
4363       && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4364       && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4365       && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4366     {
4367       *new_direct_jump_p = 1;
4368       update_cfg_for_uncondjump (undobuf.other_insn);
4369     }
4370 
4371   combine_successes++;
4372   undo_commit ();
4373 
4374   if (added_links_insn
4375       && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4376       && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4377     return added_links_insn;
4378   else
4379     return newi2pat ? i2 : i3;
4380 }
4381 
4382 /* Undo all the modifications recorded in undobuf.  */
4383 
4384 static void
4385 undo_all (void)
4386 {
4387   struct undo *undo, *next;
4388 
4389   for (undo = undobuf.undos; undo; undo = next)
4390     {
4391       next = undo->next;
4392       switch (undo->kind)
4393 	{
4394 	case UNDO_RTX:
4395 	  *undo->where.r = undo->old_contents.r;
4396 	  break;
4397 	case UNDO_INT:
4398 	  *undo->where.i = undo->old_contents.i;
4399 	  break;
4400 	case UNDO_MODE:
4401 	  adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4402 	  break;
4403 	case UNDO_LINKS:
4404 	  *undo->where.l = undo->old_contents.l;
4405 	  break;
4406 	default:
4407 	  gcc_unreachable ();
4408 	}
4409 
4410       undo->next = undobuf.frees;
4411       undobuf.frees = undo;
4412     }
4413 
4414   undobuf.undos = 0;
4415 }
4416 
4417 /* We've committed to accepting the changes we made.  Move all
4418    of the undos to the free list.  */
4419 
4420 static void
4421 undo_commit (void)
4422 {
4423   struct undo *undo, *next;
4424 
4425   for (undo = undobuf.undos; undo; undo = next)
4426     {
4427       next = undo->next;
4428       undo->next = undobuf.frees;
4429       undobuf.frees = undo;
4430     }
4431   undobuf.undos = 0;
4432 }
4433 
4434 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4435    where we have an arithmetic expression and return that point.  LOC will
4436    be inside INSN.
4437 
4438    try_combine will call this function to see if an insn can be split into
4439    two insns.  */
4440 
4441 static rtx *
4442 find_split_point (rtx *loc, rtx insn, bool set_src)
4443 {
4444   rtx x = *loc;
4445   enum rtx_code code = GET_CODE (x);
4446   rtx *split;
4447   unsigned HOST_WIDE_INT len = 0;
4448   HOST_WIDE_INT pos = 0;
4449   int unsignedp = 0;
4450   rtx inner = NULL_RTX;
4451 
4452   /* First special-case some codes.  */
4453   switch (code)
4454     {
4455     case SUBREG:
4456 #ifdef INSN_SCHEDULING
4457       /* If we are making a paradoxical SUBREG invalid, it becomes a split
4458 	 point.  */
4459       if (MEM_P (SUBREG_REG (x)))
4460 	return loc;
4461 #endif
4462       return find_split_point (&SUBREG_REG (x), insn, false);
4463 
4464     case MEM:
4465 #ifdef HAVE_lo_sum
4466       /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4467 	 using LO_SUM and HIGH.  */
4468       if (GET_CODE (XEXP (x, 0)) == CONST
4469 	  || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
4470 	{
4471 	  enum machine_mode address_mode = get_address_mode (x);
4472 
4473 	  SUBST (XEXP (x, 0),
4474 		 gen_rtx_LO_SUM (address_mode,
4475 				 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4476 				 XEXP (x, 0)));
4477 	  return &XEXP (XEXP (x, 0), 0);
4478 	}
4479 #endif
4480 
4481       /* If we have a PLUS whose second operand is a constant and the
4482 	 address is not valid, perhaps will can split it up using
4483 	 the machine-specific way to split large constants.  We use
4484 	 the first pseudo-reg (one of the virtual regs) as a placeholder;
4485 	 it will not remain in the result.  */
4486       if (GET_CODE (XEXP (x, 0)) == PLUS
4487 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4488 	  && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4489 					    MEM_ADDR_SPACE (x)))
4490 	{
4491 	  rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4492 	  rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
4493 						      XEXP (x, 0)),
4494 					 subst_insn);
4495 
4496 	  /* This should have produced two insns, each of which sets our
4497 	     placeholder.  If the source of the second is a valid address,
4498 	     we can make put both sources together and make a split point
4499 	     in the middle.  */
4500 
4501 	  if (seq
4502 	      && NEXT_INSN (seq) != NULL_RTX
4503 	      && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4504 	      && NONJUMP_INSN_P (seq)
4505 	      && GET_CODE (PATTERN (seq)) == SET
4506 	      && SET_DEST (PATTERN (seq)) == reg
4507 	      && ! reg_mentioned_p (reg,
4508 				    SET_SRC (PATTERN (seq)))
4509 	      && NONJUMP_INSN_P (NEXT_INSN (seq))
4510 	      && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4511 	      && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4512 	      && memory_address_addr_space_p
4513 		   (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4514 		    MEM_ADDR_SPACE (x)))
4515 	    {
4516 	      rtx src1 = SET_SRC (PATTERN (seq));
4517 	      rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4518 
4519 	      /* Replace the placeholder in SRC2 with SRC1.  If we can
4520 		 find where in SRC2 it was placed, that can become our
4521 		 split point and we can replace this address with SRC2.
4522 		 Just try two obvious places.  */
4523 
4524 	      src2 = replace_rtx (src2, reg, src1);
4525 	      split = 0;
4526 	      if (XEXP (src2, 0) == src1)
4527 		split = &XEXP (src2, 0);
4528 	      else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4529 		       && XEXP (XEXP (src2, 0), 0) == src1)
4530 		split = &XEXP (XEXP (src2, 0), 0);
4531 
4532 	      if (split)
4533 		{
4534 		  SUBST (XEXP (x, 0), src2);
4535 		  return split;
4536 		}
4537 	    }
4538 
4539 	  /* If that didn't work, perhaps the first operand is complex and
4540 	     needs to be computed separately, so make a split point there.
4541 	     This will occur on machines that just support REG + CONST
4542 	     and have a constant moved through some previous computation.  */
4543 
4544 	  else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4545 		   && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4546 			 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4547 	    return &XEXP (XEXP (x, 0), 0);
4548 	}
4549 
4550       /* If we have a PLUS whose first operand is complex, try computing it
4551          separately by making a split there.  */
4552       if (GET_CODE (XEXP (x, 0)) == PLUS
4553           && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4554 					    MEM_ADDR_SPACE (x))
4555           && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4556           && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4557                 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4558         return &XEXP (XEXP (x, 0), 0);
4559       break;
4560 
4561     case SET:
4562 #ifdef HAVE_cc0
4563       /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4564 	 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4565 	 we need to put the operand into a register.  So split at that
4566 	 point.  */
4567 
4568       if (SET_DEST (x) == cc0_rtx
4569 	  && GET_CODE (SET_SRC (x)) != COMPARE
4570 	  && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4571 	  && !OBJECT_P (SET_SRC (x))
4572 	  && ! (GET_CODE (SET_SRC (x)) == SUBREG
4573 		&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4574 	return &SET_SRC (x);
4575 #endif
4576 
4577       /* See if we can split SET_SRC as it stands.  */
4578       split = find_split_point (&SET_SRC (x), insn, true);
4579       if (split && split != &SET_SRC (x))
4580 	return split;
4581 
4582       /* See if we can split SET_DEST as it stands.  */
4583       split = find_split_point (&SET_DEST (x), insn, false);
4584       if (split && split != &SET_DEST (x))
4585 	return split;
4586 
4587       /* See if this is a bitfield assignment with everything constant.  If
4588 	 so, this is an IOR of an AND, so split it into that.  */
4589       if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4590 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4591 	  && CONST_INT_P (XEXP (SET_DEST (x), 1))
4592 	  && CONST_INT_P (XEXP (SET_DEST (x), 2))
4593 	  && CONST_INT_P (SET_SRC (x))
4594 	  && ((INTVAL (XEXP (SET_DEST (x), 1))
4595 	       + INTVAL (XEXP (SET_DEST (x), 2)))
4596 	      <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4597 	  && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4598 	{
4599 	  HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4600 	  unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4601 	  unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4602 	  rtx dest = XEXP (SET_DEST (x), 0);
4603 	  enum machine_mode mode = GET_MODE (dest);
4604 	  unsigned HOST_WIDE_INT mask
4605 	    = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4606 	  rtx or_mask;
4607 
4608 	  if (BITS_BIG_ENDIAN)
4609 	    pos = GET_MODE_PRECISION (mode) - len - pos;
4610 
4611 	  or_mask = gen_int_mode (src << pos, mode);
4612 	  if (src == mask)
4613 	    SUBST (SET_SRC (x),
4614 		   simplify_gen_binary (IOR, mode, dest, or_mask));
4615 	  else
4616 	    {
4617 	      rtx negmask = gen_int_mode (~(mask << pos), mode);
4618 	      SUBST (SET_SRC (x),
4619 		     simplify_gen_binary (IOR, mode,
4620 					  simplify_gen_binary (AND, mode,
4621 							       dest, negmask),
4622 					  or_mask));
4623 	    }
4624 
4625 	  SUBST (SET_DEST (x), dest);
4626 
4627 	  split = find_split_point (&SET_SRC (x), insn, true);
4628 	  if (split && split != &SET_SRC (x))
4629 	    return split;
4630 	}
4631 
4632       /* Otherwise, see if this is an operation that we can split into two.
4633 	 If so, try to split that.  */
4634       code = GET_CODE (SET_SRC (x));
4635 
4636       switch (code)
4637 	{
4638 	case AND:
4639 	  /* If we are AND'ing with a large constant that is only a single
4640 	     bit and the result is only being used in a context where we
4641 	     need to know if it is zero or nonzero, replace it with a bit
4642 	     extraction.  This will avoid the large constant, which might
4643 	     have taken more than one insn to make.  If the constant were
4644 	     not a valid argument to the AND but took only one insn to make,
4645 	     this is no worse, but if it took more than one insn, it will
4646 	     be better.  */
4647 
4648 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4649 	      && REG_P (XEXP (SET_SRC (x), 0))
4650 	      && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4651 	      && REG_P (SET_DEST (x))
4652 	      && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
4653 	      && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4654 	      && XEXP (*split, 0) == SET_DEST (x)
4655 	      && XEXP (*split, 1) == const0_rtx)
4656 	    {
4657 	      rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4658 						XEXP (SET_SRC (x), 0),
4659 						pos, NULL_RTX, 1, 1, 0, 0);
4660 	      if (extraction != 0)
4661 		{
4662 		  SUBST (SET_SRC (x), extraction);
4663 		  return find_split_point (loc, insn, false);
4664 		}
4665 	    }
4666 	  break;
4667 
4668 	case NE:
4669 	  /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4670 	     is known to be on, this can be converted into a NEG of a shift.  */
4671 	  if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4672 	      && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4673 	      && 1 <= (pos = exact_log2
4674 		       (nonzero_bits (XEXP (SET_SRC (x), 0),
4675 				      GET_MODE (XEXP (SET_SRC (x), 0))))))
4676 	    {
4677 	      enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4678 
4679 	      SUBST (SET_SRC (x),
4680 		     gen_rtx_NEG (mode,
4681 				  gen_rtx_LSHIFTRT (mode,
4682 						    XEXP (SET_SRC (x), 0),
4683 						    GEN_INT (pos))));
4684 
4685 	      split = find_split_point (&SET_SRC (x), insn, true);
4686 	      if (split && split != &SET_SRC (x))
4687 		return split;
4688 	    }
4689 	  break;
4690 
4691 	case SIGN_EXTEND:
4692 	  inner = XEXP (SET_SRC (x), 0);
4693 
4694 	  /* We can't optimize if either mode is a partial integer
4695 	     mode as we don't know how many bits are significant
4696 	     in those modes.  */
4697 	  if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4698 	      || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4699 	    break;
4700 
4701 	  pos = 0;
4702 	  len = GET_MODE_PRECISION (GET_MODE (inner));
4703 	  unsignedp = 0;
4704 	  break;
4705 
4706 	case SIGN_EXTRACT:
4707 	case ZERO_EXTRACT:
4708 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4709 	      && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4710 	    {
4711 	      inner = XEXP (SET_SRC (x), 0);
4712 	      len = INTVAL (XEXP (SET_SRC (x), 1));
4713 	      pos = INTVAL (XEXP (SET_SRC (x), 2));
4714 
4715 	      if (BITS_BIG_ENDIAN)
4716 		pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
4717 	      unsignedp = (code == ZERO_EXTRACT);
4718 	    }
4719 	  break;
4720 
4721 	default:
4722 	  break;
4723 	}
4724 
4725       if (len && pos >= 0
4726 	  && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
4727 	{
4728 	  enum machine_mode mode = GET_MODE (SET_SRC (x));
4729 
4730 	  /* For unsigned, we have a choice of a shift followed by an
4731 	     AND or two shifts.  Use two shifts for field sizes where the
4732 	     constant might be too large.  We assume here that we can
4733 	     always at least get 8-bit constants in an AND insn, which is
4734 	     true for every current RISC.  */
4735 
4736 	  if (unsignedp && len <= 8)
4737 	    {
4738 	      SUBST (SET_SRC (x),
4739 		     gen_rtx_AND (mode,
4740 				  gen_rtx_LSHIFTRT
4741 				  (mode, gen_lowpart (mode, inner),
4742 				   GEN_INT (pos)),
4743 				  GEN_INT (((unsigned HOST_WIDE_INT) 1 << len)
4744 					   - 1)));
4745 
4746 	      split = find_split_point (&SET_SRC (x), insn, true);
4747 	      if (split && split != &SET_SRC (x))
4748 		return split;
4749 	    }
4750 	  else
4751 	    {
4752 	      SUBST (SET_SRC (x),
4753 		     gen_rtx_fmt_ee
4754 		     (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
4755 		      gen_rtx_ASHIFT (mode,
4756 				      gen_lowpart (mode, inner),
4757 				      GEN_INT (GET_MODE_PRECISION (mode)
4758 					       - len - pos)),
4759 		      GEN_INT (GET_MODE_PRECISION (mode) - len)));
4760 
4761 	      split = find_split_point (&SET_SRC (x), insn, true);
4762 	      if (split && split != &SET_SRC (x))
4763 		return split;
4764 	    }
4765 	}
4766 
4767       /* See if this is a simple operation with a constant as the second
4768 	 operand.  It might be that this constant is out of range and hence
4769 	 could be used as a split point.  */
4770       if (BINARY_P (SET_SRC (x))
4771 	  && CONSTANT_P (XEXP (SET_SRC (x), 1))
4772 	  && (OBJECT_P (XEXP (SET_SRC (x), 0))
4773 	      || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
4774 		  && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
4775 	return &XEXP (SET_SRC (x), 1);
4776 
4777       /* Finally, see if this is a simple operation with its first operand
4778 	 not in a register.  The operation might require this operand in a
4779 	 register, so return it as a split point.  We can always do this
4780 	 because if the first operand were another operation, we would have
4781 	 already found it as a split point.  */
4782       if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
4783 	  && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
4784 	return &XEXP (SET_SRC (x), 0);
4785 
4786       return 0;
4787 
4788     case AND:
4789     case IOR:
4790       /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
4791 	 it is better to write this as (not (ior A B)) so we can split it.
4792 	 Similarly for IOR.  */
4793       if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
4794 	{
4795 	  SUBST (*loc,
4796 		 gen_rtx_NOT (GET_MODE (x),
4797 			      gen_rtx_fmt_ee (code == IOR ? AND : IOR,
4798 					      GET_MODE (x),
4799 					      XEXP (XEXP (x, 0), 0),
4800 					      XEXP (XEXP (x, 1), 0))));
4801 	  return find_split_point (loc, insn, set_src);
4802 	}
4803 
4804       /* Many RISC machines have a large set of logical insns.  If the
4805 	 second operand is a NOT, put it first so we will try to split the
4806 	 other operand first.  */
4807       if (GET_CODE (XEXP (x, 1)) == NOT)
4808 	{
4809 	  rtx tem = XEXP (x, 0);
4810 	  SUBST (XEXP (x, 0), XEXP (x, 1));
4811 	  SUBST (XEXP (x, 1), tem);
4812 	}
4813       break;
4814 
4815     case PLUS:
4816     case MINUS:
4817       /* Canonicalization can produce (minus A (mult B C)), where C is a
4818 	 constant.  It may be better to try splitting (plus (mult B -C) A)
4819 	 instead if this isn't a multiply by a power of two.  */
4820       if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
4821 	  && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4822 	  && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
4823 	{
4824 	  enum machine_mode mode = GET_MODE (x);
4825 	  unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
4826 	  HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
4827 	  SUBST (*loc, gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
4828 							 XEXP (XEXP (x, 1), 0),
4829 							 GEN_INT (other_int)),
4830 				     XEXP (x, 0)));
4831 	  return find_split_point (loc, insn, set_src);
4832 	}
4833 
4834       /* Split at a multiply-accumulate instruction.  However if this is
4835          the SET_SRC, we likely do not have such an instruction and it's
4836          worthless to try this split.  */
4837       if (!set_src && GET_CODE (XEXP (x, 0)) == MULT)
4838         return loc;
4839 
4840     default:
4841       break;
4842     }
4843 
4844   /* Otherwise, select our actions depending on our rtx class.  */
4845   switch (GET_RTX_CLASS (code))
4846     {
4847     case RTX_BITFIELD_OPS:		/* This is ZERO_EXTRACT and SIGN_EXTRACT.  */
4848     case RTX_TERNARY:
4849       split = find_split_point (&XEXP (x, 2), insn, false);
4850       if (split)
4851 	return split;
4852       /* ... fall through ...  */
4853     case RTX_BIN_ARITH:
4854     case RTX_COMM_ARITH:
4855     case RTX_COMPARE:
4856     case RTX_COMM_COMPARE:
4857       split = find_split_point (&XEXP (x, 1), insn, false);
4858       if (split)
4859 	return split;
4860       /* ... fall through ...  */
4861     case RTX_UNARY:
4862       /* Some machines have (and (shift ...) ...) insns.  If X is not
4863 	 an AND, but XEXP (X, 0) is, use it as our split point.  */
4864       if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
4865 	return &XEXP (x, 0);
4866 
4867       split = find_split_point (&XEXP (x, 0), insn, false);
4868       if (split)
4869 	return split;
4870       return loc;
4871 
4872     default:
4873       /* Otherwise, we don't have a split point.  */
4874       return 0;
4875     }
4876 }
4877 
4878 /* Throughout X, replace FROM with TO, and return the result.
4879    The result is TO if X is FROM;
4880    otherwise the result is X, but its contents may have been modified.
4881    If they were modified, a record was made in undobuf so that
4882    undo_all will (among other things) return X to its original state.
4883 
4884    If the number of changes necessary is too much to record to undo,
4885    the excess changes are not made, so the result is invalid.
4886    The changes already made can still be undone.
4887    undobuf.num_undo is incremented for such changes, so by testing that
4888    the caller can tell whether the result is valid.
4889 
4890    `n_occurrences' is incremented each time FROM is replaced.
4891 
4892    IN_DEST is nonzero if we are processing the SET_DEST of a SET.
4893 
4894    IN_COND is nonzero if we are at the top level of a condition.
4895 
4896    UNIQUE_COPY is nonzero if each substitution must be unique.  We do this
4897    by copying if `n_occurrences' is nonzero.  */
4898 
4899 static rtx
4900 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
4901 {
4902   enum rtx_code code = GET_CODE (x);
4903   enum machine_mode op0_mode = VOIDmode;
4904   const char *fmt;
4905   int len, i;
4906   rtx new_rtx;
4907 
4908 /* Two expressions are equal if they are identical copies of a shared
4909    RTX or if they are both registers with the same register number
4910    and mode.  */
4911 
4912 #define COMBINE_RTX_EQUAL_P(X,Y)			\
4913   ((X) == (Y)						\
4914    || (REG_P (X) && REG_P (Y)	\
4915        && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
4916 
4917   if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
4918     {
4919       n_occurrences++;
4920       return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
4921     }
4922 
4923   /* If X and FROM are the same register but different modes, they
4924      will not have been seen as equal above.  However, the log links code
4925      will make a LOG_LINKS entry for that case.  If we do nothing, we
4926      will try to rerecognize our original insn and, when it succeeds,
4927      we will delete the feeding insn, which is incorrect.
4928 
4929      So force this insn not to match in this (rare) case.  */
4930   if (! in_dest && code == REG && REG_P (from)
4931       && reg_overlap_mentioned_p (x, from))
4932     return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
4933 
4934   /* If this is an object, we are done unless it is a MEM or LO_SUM, both
4935      of which may contain things that can be combined.  */
4936   if (code != MEM && code != LO_SUM && OBJECT_P (x))
4937     return x;
4938 
4939   /* It is possible to have a subexpression appear twice in the insn.
4940      Suppose that FROM is a register that appears within TO.
4941      Then, after that subexpression has been scanned once by `subst',
4942      the second time it is scanned, TO may be found.  If we were
4943      to scan TO here, we would find FROM within it and create a
4944      self-referent rtl structure which is completely wrong.  */
4945   if (COMBINE_RTX_EQUAL_P (x, to))
4946     return to;
4947 
4948   /* Parallel asm_operands need special attention because all of the
4949      inputs are shared across the arms.  Furthermore, unsharing the
4950      rtl results in recognition failures.  Failure to handle this case
4951      specially can result in circular rtl.
4952 
4953      Solve this by doing a normal pass across the first entry of the
4954      parallel, and only processing the SET_DESTs of the subsequent
4955      entries.  Ug.  */
4956 
4957   if (code == PARALLEL
4958       && GET_CODE (XVECEXP (x, 0, 0)) == SET
4959       && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
4960     {
4961       new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
4962 
4963       /* If this substitution failed, this whole thing fails.  */
4964       if (GET_CODE (new_rtx) == CLOBBER
4965 	  && XEXP (new_rtx, 0) == const0_rtx)
4966 	return new_rtx;
4967 
4968       SUBST (XVECEXP (x, 0, 0), new_rtx);
4969 
4970       for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
4971 	{
4972 	  rtx dest = SET_DEST (XVECEXP (x, 0, i));
4973 
4974 	  if (!REG_P (dest)
4975 	      && GET_CODE (dest) != CC0
4976 	      && GET_CODE (dest) != PC)
4977 	    {
4978 	      new_rtx = subst (dest, from, to, 0, 0, unique_copy);
4979 
4980 	      /* If this substitution failed, this whole thing fails.  */
4981 	      if (GET_CODE (new_rtx) == CLOBBER
4982 		  && XEXP (new_rtx, 0) == const0_rtx)
4983 		return new_rtx;
4984 
4985 	      SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
4986 	    }
4987 	}
4988     }
4989   else
4990     {
4991       len = GET_RTX_LENGTH (code);
4992       fmt = GET_RTX_FORMAT (code);
4993 
4994       /* We don't need to process a SET_DEST that is a register, CC0,
4995 	 or PC, so set up to skip this common case.  All other cases
4996 	 where we want to suppress replacing something inside a
4997 	 SET_SRC are handled via the IN_DEST operand.  */
4998       if (code == SET
4999 	  && (REG_P (SET_DEST (x))
5000 	      || GET_CODE (SET_DEST (x)) == CC0
5001 	      || GET_CODE (SET_DEST (x)) == PC))
5002 	fmt = "ie";
5003 
5004       /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5005 	 constant.  */
5006       if (fmt[0] == 'e')
5007 	op0_mode = GET_MODE (XEXP (x, 0));
5008 
5009       for (i = 0; i < len; i++)
5010 	{
5011 	  if (fmt[i] == 'E')
5012 	    {
5013 	      int j;
5014 	      for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5015 		{
5016 		  if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5017 		    {
5018 		      new_rtx = (unique_copy && n_occurrences
5019 			     ? copy_rtx (to) : to);
5020 		      n_occurrences++;
5021 		    }
5022 		  else
5023 		    {
5024 		      new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5025 				       unique_copy);
5026 
5027 		      /* If this substitution failed, this whole thing
5028 			 fails.  */
5029 		      if (GET_CODE (new_rtx) == CLOBBER
5030 			  && XEXP (new_rtx, 0) == const0_rtx)
5031 			return new_rtx;
5032 		    }
5033 
5034 		  SUBST (XVECEXP (x, i, j), new_rtx);
5035 		}
5036 	    }
5037 	  else if (fmt[i] == 'e')
5038 	    {
5039 	      /* If this is a register being set, ignore it.  */
5040 	      new_rtx = XEXP (x, i);
5041 	      if (in_dest
5042 		  && i == 0
5043 		  && (((code == SUBREG || code == ZERO_EXTRACT)
5044 		       && REG_P (new_rtx))
5045 		      || code == STRICT_LOW_PART))
5046 		;
5047 
5048 	      else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5049 		{
5050 		  /* In general, don't install a subreg involving two
5051 		     modes not tieable.  It can worsen register
5052 		     allocation, and can even make invalid reload
5053 		     insns, since the reg inside may need to be copied
5054 		     from in the outside mode, and that may be invalid
5055 		     if it is an fp reg copied in integer mode.
5056 
5057 		     We allow two exceptions to this: It is valid if
5058 		     it is inside another SUBREG and the mode of that
5059 		     SUBREG and the mode of the inside of TO is
5060 		     tieable and it is valid if X is a SET that copies
5061 		     FROM to CC0.  */
5062 
5063 		  if (GET_CODE (to) == SUBREG
5064 		      && ! MODES_TIEABLE_P (GET_MODE (to),
5065 					    GET_MODE (SUBREG_REG (to)))
5066 		      && ! (code == SUBREG
5067 			    && MODES_TIEABLE_P (GET_MODE (x),
5068 						GET_MODE (SUBREG_REG (to))))
5069 #ifdef HAVE_cc0
5070 		      && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
5071 #endif
5072 		      )
5073 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5074 
5075 #ifdef CANNOT_CHANGE_MODE_CLASS
5076 		  if (code == SUBREG
5077 		      && REG_P (to)
5078 		      && REGNO (to) < FIRST_PSEUDO_REGISTER
5079 		      && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
5080 						   GET_MODE (to),
5081 						   GET_MODE (x)))
5082 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5083 #endif
5084 
5085 		  new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5086 		  n_occurrences++;
5087 		}
5088 	      else
5089 		/* If we are in a SET_DEST, suppress most cases unless we
5090 		   have gone inside a MEM, in which case we want to
5091 		   simplify the address.  We assume here that things that
5092 		   are actually part of the destination have their inner
5093 		   parts in the first expression.  This is true for SUBREG,
5094 		   STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5095 		   things aside from REG and MEM that should appear in a
5096 		   SET_DEST.  */
5097 		new_rtx = subst (XEXP (x, i), from, to,
5098 			     (((in_dest
5099 				&& (code == SUBREG || code == STRICT_LOW_PART
5100 				    || code == ZERO_EXTRACT))
5101 			       || code == SET)
5102 			      && i == 0),
5103 				 code == IF_THEN_ELSE && i == 0,
5104 				 unique_copy);
5105 
5106 	      /* If we found that we will have to reject this combination,
5107 		 indicate that by returning the CLOBBER ourselves, rather than
5108 		 an expression containing it.  This will speed things up as
5109 		 well as prevent accidents where two CLOBBERs are considered
5110 		 to be equal, thus producing an incorrect simplification.  */
5111 
5112 	      if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5113 		return new_rtx;
5114 
5115 	      if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5116 		{
5117 		  enum machine_mode mode = GET_MODE (x);
5118 
5119 		  x = simplify_subreg (GET_MODE (x), new_rtx,
5120 				       GET_MODE (SUBREG_REG (x)),
5121 				       SUBREG_BYTE (x));
5122 		  if (! x)
5123 		    x = gen_rtx_CLOBBER (mode, const0_rtx);
5124 		}
5125 	      else if (CONST_INT_P (new_rtx)
5126 		       && GET_CODE (x) == ZERO_EXTEND)
5127 		{
5128 		  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5129 						new_rtx, GET_MODE (XEXP (x, 0)));
5130 		  gcc_assert (x);
5131 		}
5132 	      else
5133 		SUBST (XEXP (x, i), new_rtx);
5134 	    }
5135 	}
5136     }
5137 
5138   /* Check if we are loading something from the constant pool via float
5139      extension; in this case we would undo compress_float_constant
5140      optimization and degenerate constant load to an immediate value.  */
5141   if (GET_CODE (x) == FLOAT_EXTEND
5142       && MEM_P (XEXP (x, 0))
5143       && MEM_READONLY_P (XEXP (x, 0)))
5144     {
5145       rtx tmp = avoid_constant_pool_reference (x);
5146       if (x != tmp)
5147         return x;
5148     }
5149 
5150   /* Try to simplify X.  If the simplification changed the code, it is likely
5151      that further simplification will help, so loop, but limit the number
5152      of repetitions that will be performed.  */
5153 
5154   for (i = 0; i < 4; i++)
5155     {
5156       /* If X is sufficiently simple, don't bother trying to do anything
5157 	 with it.  */
5158       if (code != CONST_INT && code != REG && code != CLOBBER)
5159 	x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5160 
5161       if (GET_CODE (x) == code)
5162 	break;
5163 
5164       code = GET_CODE (x);
5165 
5166       /* We no longer know the original mode of operand 0 since we
5167 	 have changed the form of X)  */
5168       op0_mode = VOIDmode;
5169     }
5170 
5171   return x;
5172 }
5173 
5174 /* Simplify X, a piece of RTL.  We just operate on the expression at the
5175    outer level; call `subst' to simplify recursively.  Return the new
5176    expression.
5177 
5178    OP0_MODE is the original mode of XEXP (x, 0).  IN_DEST is nonzero
5179    if we are inside a SET_DEST.  IN_COND is nonzero if we are at the top level
5180    of a condition.  */
5181 
5182 static rtx
5183 combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest,
5184 		      int in_cond)
5185 {
5186   enum rtx_code code = GET_CODE (x);
5187   enum machine_mode mode = GET_MODE (x);
5188   rtx temp;
5189   int i;
5190 
5191   /* If this is a commutative operation, put a constant last and a complex
5192      expression first.  We don't need to do this for comparisons here.  */
5193   if (COMMUTATIVE_ARITH_P (x)
5194       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5195     {
5196       temp = XEXP (x, 0);
5197       SUBST (XEXP (x, 0), XEXP (x, 1));
5198       SUBST (XEXP (x, 1), temp);
5199     }
5200 
5201   /* If this is a simple operation applied to an IF_THEN_ELSE, try
5202      applying it to the arms of the IF_THEN_ELSE.  This often simplifies
5203      things.  Check for cases where both arms are testing the same
5204      condition.
5205 
5206      Don't do anything if all operands are very simple.  */
5207 
5208   if ((BINARY_P (x)
5209        && ((!OBJECT_P (XEXP (x, 0))
5210 	    && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5211 		  && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5212 	   || (!OBJECT_P (XEXP (x, 1))
5213 	       && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5214 		     && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5215       || (UNARY_P (x)
5216 	  && (!OBJECT_P (XEXP (x, 0))
5217 	       && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5218 		     && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5219     {
5220       rtx cond, true_rtx, false_rtx;
5221 
5222       cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5223       if (cond != 0
5224 	  /* If everything is a comparison, what we have is highly unlikely
5225 	     to be simpler, so don't use it.  */
5226 	  && ! (COMPARISON_P (x)
5227 		&& (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5228 	{
5229 	  rtx cop1 = const0_rtx;
5230 	  enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5231 
5232 	  if (cond_code == NE && COMPARISON_P (cond))
5233 	    return x;
5234 
5235 	  /* Simplify the alternative arms; this may collapse the true and
5236 	     false arms to store-flag values.  Be careful to use copy_rtx
5237 	     here since true_rtx or false_rtx might share RTL with x as a
5238 	     result of the if_then_else_cond call above.  */
5239 	  true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5240 	  false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5241 
5242 	  /* If true_rtx and false_rtx are not general_operands, an if_then_else
5243 	     is unlikely to be simpler.  */
5244 	  if (general_operand (true_rtx, VOIDmode)
5245 	      && general_operand (false_rtx, VOIDmode))
5246 	    {
5247 	      enum rtx_code reversed;
5248 
5249 	      /* Restarting if we generate a store-flag expression will cause
5250 		 us to loop.  Just drop through in this case.  */
5251 
5252 	      /* If the result values are STORE_FLAG_VALUE and zero, we can
5253 		 just make the comparison operation.  */
5254 	      if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5255 		x = simplify_gen_relational (cond_code, mode, VOIDmode,
5256 					     cond, cop1);
5257 	      else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5258 		       && ((reversed = reversed_comparison_code_parts
5259 					(cond_code, cond, cop1, NULL))
5260 			   != UNKNOWN))
5261 		x = simplify_gen_relational (reversed, mode, VOIDmode,
5262 					     cond, cop1);
5263 
5264 	      /* Likewise, we can make the negate of a comparison operation
5265 		 if the result values are - STORE_FLAG_VALUE and zero.  */
5266 	      else if (CONST_INT_P (true_rtx)
5267 		       && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5268 		       && false_rtx == const0_rtx)
5269 		x = simplify_gen_unary (NEG, mode,
5270 					simplify_gen_relational (cond_code,
5271 								 mode, VOIDmode,
5272 								 cond, cop1),
5273 					mode);
5274 	      else if (CONST_INT_P (false_rtx)
5275 		       && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5276 		       && true_rtx == const0_rtx
5277 		       && ((reversed = reversed_comparison_code_parts
5278 					(cond_code, cond, cop1, NULL))
5279 			   != UNKNOWN))
5280 		x = simplify_gen_unary (NEG, mode,
5281 					simplify_gen_relational (reversed,
5282 								 mode, VOIDmode,
5283 								 cond, cop1),
5284 					mode);
5285 	      else
5286 		return gen_rtx_IF_THEN_ELSE (mode,
5287 					     simplify_gen_relational (cond_code,
5288 								      mode,
5289 								      VOIDmode,
5290 								      cond,
5291 								      cop1),
5292 					     true_rtx, false_rtx);
5293 
5294 	      code = GET_CODE (x);
5295 	      op0_mode = VOIDmode;
5296 	    }
5297 	}
5298     }
5299 
5300   /* Try to fold this expression in case we have constants that weren't
5301      present before.  */
5302   temp = 0;
5303   switch (GET_RTX_CLASS (code))
5304     {
5305     case RTX_UNARY:
5306       if (op0_mode == VOIDmode)
5307 	op0_mode = GET_MODE (XEXP (x, 0));
5308       temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5309       break;
5310     case RTX_COMPARE:
5311     case RTX_COMM_COMPARE:
5312       {
5313 	enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5314 	if (cmp_mode == VOIDmode)
5315 	  {
5316 	    cmp_mode = GET_MODE (XEXP (x, 1));
5317 	    if (cmp_mode == VOIDmode)
5318 	      cmp_mode = op0_mode;
5319 	  }
5320 	temp = simplify_relational_operation (code, mode, cmp_mode,
5321 					      XEXP (x, 0), XEXP (x, 1));
5322       }
5323       break;
5324     case RTX_COMM_ARITH:
5325     case RTX_BIN_ARITH:
5326       temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5327       break;
5328     case RTX_BITFIELD_OPS:
5329     case RTX_TERNARY:
5330       temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5331 					 XEXP (x, 1), XEXP (x, 2));
5332       break;
5333     default:
5334       break;
5335     }
5336 
5337   if (temp)
5338     {
5339       x = temp;
5340       code = GET_CODE (temp);
5341       op0_mode = VOIDmode;
5342       mode = GET_MODE (temp);
5343     }
5344 
5345   /* First see if we can apply the inverse distributive law.  */
5346   if (code == PLUS || code == MINUS
5347       || code == AND || code == IOR || code == XOR)
5348     {
5349       x = apply_distributive_law (x);
5350       code = GET_CODE (x);
5351       op0_mode = VOIDmode;
5352     }
5353 
5354   /* If CODE is an associative operation not otherwise handled, see if we
5355      can associate some operands.  This can win if they are constants or
5356      if they are logically related (i.e. (a & b) & a).  */
5357   if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5358        || code == AND || code == IOR || code == XOR
5359        || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5360       && ((INTEGRAL_MODE_P (mode) && code != DIV)
5361 	  || (flag_associative_math && FLOAT_MODE_P (mode))))
5362     {
5363       if (GET_CODE (XEXP (x, 0)) == code)
5364 	{
5365 	  rtx other = XEXP (XEXP (x, 0), 0);
5366 	  rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5367 	  rtx inner_op1 = XEXP (x, 1);
5368 	  rtx inner;
5369 
5370 	  /* Make sure we pass the constant operand if any as the second
5371 	     one if this is a commutative operation.  */
5372 	  if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5373 	    {
5374 	      rtx tem = inner_op0;
5375 	      inner_op0 = inner_op1;
5376 	      inner_op1 = tem;
5377 	    }
5378 	  inner = simplify_binary_operation (code == MINUS ? PLUS
5379 					     : code == DIV ? MULT
5380 					     : code,
5381 					     mode, inner_op0, inner_op1);
5382 
5383 	  /* For commutative operations, try the other pair if that one
5384 	     didn't simplify.  */
5385 	  if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5386 	    {
5387 	      other = XEXP (XEXP (x, 0), 1);
5388 	      inner = simplify_binary_operation (code, mode,
5389 						 XEXP (XEXP (x, 0), 0),
5390 						 XEXP (x, 1));
5391 	    }
5392 
5393 	  if (inner)
5394 	    return simplify_gen_binary (code, mode, other, inner);
5395 	}
5396     }
5397 
5398   /* A little bit of algebraic simplification here.  */
5399   switch (code)
5400     {
5401     case MEM:
5402       /* Ensure that our address has any ASHIFTs converted to MULT in case
5403 	 address-recognizing predicates are called later.  */
5404       temp = make_compound_operation (XEXP (x, 0), MEM);
5405       SUBST (XEXP (x, 0), temp);
5406       break;
5407 
5408     case SUBREG:
5409       if (op0_mode == VOIDmode)
5410 	op0_mode = GET_MODE (SUBREG_REG (x));
5411 
5412       /* See if this can be moved to simplify_subreg.  */
5413       if (CONSTANT_P (SUBREG_REG (x))
5414 	  && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5415 	     /* Don't call gen_lowpart if the inner mode
5416 		is VOIDmode and we cannot simplify it, as SUBREG without
5417 		inner mode is invalid.  */
5418 	  && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5419 	      || gen_lowpart_common (mode, SUBREG_REG (x))))
5420 	return gen_lowpart (mode, SUBREG_REG (x));
5421 
5422       if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5423 	break;
5424       {
5425 	rtx temp;
5426 	temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5427 				SUBREG_BYTE (x));
5428 	if (temp)
5429 	  return temp;
5430       }
5431 
5432       /* Don't change the mode of the MEM if that would change the meaning
5433 	 of the address.  */
5434       if (MEM_P (SUBREG_REG (x))
5435 	  && (MEM_VOLATILE_P (SUBREG_REG (x))
5436 	      || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5437 					   MEM_ADDR_SPACE (SUBREG_REG (x)))))
5438 	return gen_rtx_CLOBBER (mode, const0_rtx);
5439 
5440       /* Note that we cannot do any narrowing for non-constants since
5441 	 we might have been counting on using the fact that some bits were
5442 	 zero.  We now do this in the SET.  */
5443 
5444       break;
5445 
5446     case NEG:
5447       temp = expand_compound_operation (XEXP (x, 0));
5448 
5449       /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5450 	 replaced by (lshiftrt X C).  This will convert
5451 	 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y).  */
5452 
5453       if (GET_CODE (temp) == ASHIFTRT
5454 	  && CONST_INT_P (XEXP (temp, 1))
5455 	  && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5456 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5457 				     INTVAL (XEXP (temp, 1)));
5458 
5459       /* If X has only a single bit that might be nonzero, say, bit I, convert
5460 	 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5461 	 MODE minus 1.  This will convert (neg (zero_extract X 1 Y)) to
5462 	 (sign_extract X 1 Y).  But only do this if TEMP isn't a register
5463 	 or a SUBREG of one since we'd be making the expression more
5464 	 complex if it was just a register.  */
5465 
5466       if (!REG_P (temp)
5467 	  && ! (GET_CODE (temp) == SUBREG
5468 		&& REG_P (SUBREG_REG (temp)))
5469 	  && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5470 	{
5471 	  rtx temp1 = simplify_shift_const
5472 	    (NULL_RTX, ASHIFTRT, mode,
5473 	     simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5474 				   GET_MODE_PRECISION (mode) - 1 - i),
5475 	     GET_MODE_PRECISION (mode) - 1 - i);
5476 
5477 	  /* If all we did was surround TEMP with the two shifts, we
5478 	     haven't improved anything, so don't use it.  Otherwise,
5479 	     we are better off with TEMP1.  */
5480 	  if (GET_CODE (temp1) != ASHIFTRT
5481 	      || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5482 	      || XEXP (XEXP (temp1, 0), 0) != temp)
5483 	    return temp1;
5484 	}
5485       break;
5486 
5487     case TRUNCATE:
5488       /* We can't handle truncation to a partial integer mode here
5489 	 because we don't know the real bitsize of the partial
5490 	 integer mode.  */
5491       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5492 	break;
5493 
5494       if (HWI_COMPUTABLE_MODE_P (mode))
5495 	SUBST (XEXP (x, 0),
5496 	       force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5497 			      GET_MODE_MASK (mode), 0));
5498 
5499       /* We can truncate a constant value and return it.  */
5500       if (CONST_INT_P (XEXP (x, 0)))
5501 	return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5502 
5503       /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5504 	 whose value is a comparison can be replaced with a subreg if
5505 	 STORE_FLAG_VALUE permits.  */
5506       if (HWI_COMPUTABLE_MODE_P (mode)
5507 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5508 	  && (temp = get_last_value (XEXP (x, 0)))
5509 	  && COMPARISON_P (temp))
5510 	return gen_lowpart (mode, XEXP (x, 0));
5511       break;
5512 
5513     case CONST:
5514       /* (const (const X)) can become (const X).  Do it this way rather than
5515 	 returning the inner CONST since CONST can be shared with a
5516 	 REG_EQUAL note.  */
5517       if (GET_CODE (XEXP (x, 0)) == CONST)
5518 	SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5519       break;
5520 
5521 #ifdef HAVE_lo_sum
5522     case LO_SUM:
5523       /* Convert (lo_sum (high FOO) FOO) to FOO.  This is necessary so we
5524 	 can add in an offset.  find_split_point will split this address up
5525 	 again if it doesn't match.  */
5526       if (GET_CODE (XEXP (x, 0)) == HIGH
5527 	  && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5528 	return XEXP (x, 1);
5529       break;
5530 #endif
5531 
5532     case PLUS:
5533       /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5534 	 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5535 	 bit-field and can be replaced by either a sign_extend or a
5536 	 sign_extract.  The `and' may be a zero_extend and the two
5537 	 <c>, -<c> constants may be reversed.  */
5538       if (GET_CODE (XEXP (x, 0)) == XOR
5539 	  && CONST_INT_P (XEXP (x, 1))
5540 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5541 	  && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5542 	  && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5543 	      || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5544 	  && HWI_COMPUTABLE_MODE_P (mode)
5545 	  && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5546 	       && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5547 	       && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5548 		   == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5549 	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5550 		  && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5551 		      == (unsigned int) i + 1))))
5552 	return simplify_shift_const
5553 	  (NULL_RTX, ASHIFTRT, mode,
5554 	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
5555 				 XEXP (XEXP (XEXP (x, 0), 0), 0),
5556 				 GET_MODE_PRECISION (mode) - (i + 1)),
5557 	   GET_MODE_PRECISION (mode) - (i + 1));
5558 
5559       /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5560 	 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5561 	 the bitsize of the mode - 1.  This allows simplification of
5562 	 "a = (b & 8) == 0;"  */
5563       if (XEXP (x, 1) == constm1_rtx
5564 	  && !REG_P (XEXP (x, 0))
5565 	  && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5566 		&& REG_P (SUBREG_REG (XEXP (x, 0))))
5567 	  && nonzero_bits (XEXP (x, 0), mode) == 1)
5568 	return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5569 	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
5570 				 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5571 				 GET_MODE_PRECISION (mode) - 1),
5572 	   GET_MODE_PRECISION (mode) - 1);
5573 
5574       /* If we are adding two things that have no bits in common, convert
5575 	 the addition into an IOR.  This will often be further simplified,
5576 	 for example in cases like ((a & 1) + (a & 2)), which can
5577 	 become a & 3.  */
5578 
5579       if (HWI_COMPUTABLE_MODE_P (mode)
5580 	  && (nonzero_bits (XEXP (x, 0), mode)
5581 	      & nonzero_bits (XEXP (x, 1), mode)) == 0)
5582 	{
5583 	  /* Try to simplify the expression further.  */
5584 	  rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5585 	  temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5586 
5587 	  /* If we could, great.  If not, do not go ahead with the IOR
5588 	     replacement, since PLUS appears in many special purpose
5589 	     address arithmetic instructions.  */
5590 	  if (GET_CODE (temp) != CLOBBER
5591 	      && (GET_CODE (temp) != IOR
5592 		  || ((XEXP (temp, 0) != XEXP (x, 0)
5593 		       || XEXP (temp, 1) != XEXP (x, 1))
5594 		      && (XEXP (temp, 0) != XEXP (x, 1)
5595 			  || XEXP (temp, 1) != XEXP (x, 0)))))
5596 	    return temp;
5597 	}
5598       break;
5599 
5600     case MINUS:
5601       /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5602 	 (and <foo> (const_int pow2-1))  */
5603       if (GET_CODE (XEXP (x, 1)) == AND
5604 	  && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5605 	  && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5606 	  && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5607 	return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5608 				       -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5609       break;
5610 
5611     case MULT:
5612       /* If we have (mult (plus A B) C), apply the distributive law and then
5613 	 the inverse distributive law to see if things simplify.  This
5614 	 occurs mostly in addresses, often when unrolling loops.  */
5615 
5616       if (GET_CODE (XEXP (x, 0)) == PLUS)
5617 	{
5618 	  rtx result = distribute_and_simplify_rtx (x, 0);
5619 	  if (result)
5620 	    return result;
5621 	}
5622 
5623       /* Try simplify a*(b/c) as (a*b)/c.  */
5624       if (FLOAT_MODE_P (mode) && flag_associative_math
5625 	  && GET_CODE (XEXP (x, 0)) == DIV)
5626 	{
5627 	  rtx tem = simplify_binary_operation (MULT, mode,
5628 					       XEXP (XEXP (x, 0), 0),
5629 					       XEXP (x, 1));
5630 	  if (tem)
5631 	    return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5632 	}
5633       break;
5634 
5635     case UDIV:
5636       /* If this is a divide by a power of two, treat it as a shift if
5637 	 its first operand is a shift.  */
5638       if (CONST_INT_P (XEXP (x, 1))
5639 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5640 	  && (GET_CODE (XEXP (x, 0)) == ASHIFT
5641 	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5642 	      || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5643 	      || GET_CODE (XEXP (x, 0)) == ROTATE
5644 	      || GET_CODE (XEXP (x, 0)) == ROTATERT))
5645 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5646       break;
5647 
5648     case EQ:  case NE:
5649     case GT:  case GTU:  case GE:  case GEU:
5650     case LT:  case LTU:  case LE:  case LEU:
5651     case UNEQ:  case LTGT:
5652     case UNGT:  case UNGE:
5653     case UNLT:  case UNLE:
5654     case UNORDERED: case ORDERED:
5655       /* If the first operand is a condition code, we can't do anything
5656 	 with it.  */
5657       if (GET_CODE (XEXP (x, 0)) == COMPARE
5658 	  || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5659 	      && ! CC0_P (XEXP (x, 0))))
5660 	{
5661 	  rtx op0 = XEXP (x, 0);
5662 	  rtx op1 = XEXP (x, 1);
5663 	  enum rtx_code new_code;
5664 
5665 	  if (GET_CODE (op0) == COMPARE)
5666 	    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5667 
5668 	  /* Simplify our comparison, if possible.  */
5669 	  new_code = simplify_comparison (code, &op0, &op1);
5670 
5671 	  /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5672 	     if only the low-order bit is possibly nonzero in X (such as when
5673 	     X is a ZERO_EXTRACT of one bit).  Similarly, we can convert EQ to
5674 	     (xor X 1) or (minus 1 X); we use the former.  Finally, if X is
5675 	     known to be either 0 or -1, NE becomes a NEG and EQ becomes
5676 	     (plus X 1).
5677 
5678 	     Remove any ZERO_EXTRACT we made when thinking this was a
5679 	     comparison.  It may now be simpler to use, e.g., an AND.  If a
5680 	     ZERO_EXTRACT is indeed appropriate, it will be placed back by
5681 	     the call to make_compound_operation in the SET case.
5682 
5683 	     Don't apply these optimizations if the caller would
5684 	     prefer a comparison rather than a value.
5685 	     E.g., for the condition in an IF_THEN_ELSE most targets need
5686 	     an explicit comparison.  */
5687 
5688 	  if (in_cond)
5689 	    ;
5690 
5691 	  else if (STORE_FLAG_VALUE == 1
5692 	      && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5693 	      && op1 == const0_rtx
5694 	      && mode == GET_MODE (op0)
5695 	      && nonzero_bits (op0, mode) == 1)
5696 	    return gen_lowpart (mode,
5697 				expand_compound_operation (op0));
5698 
5699 	  else if (STORE_FLAG_VALUE == 1
5700 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5701 		   && op1 == const0_rtx
5702 		   && mode == GET_MODE (op0)
5703 		   && (num_sign_bit_copies (op0, mode)
5704 		       == GET_MODE_PRECISION (mode)))
5705 	    {
5706 	      op0 = expand_compound_operation (op0);
5707 	      return simplify_gen_unary (NEG, mode,
5708 					 gen_lowpart (mode, op0),
5709 					 mode);
5710 	    }
5711 
5712 	  else if (STORE_FLAG_VALUE == 1
5713 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5714 		   && op1 == const0_rtx
5715 		   && mode == GET_MODE (op0)
5716 		   && nonzero_bits (op0, mode) == 1)
5717 	    {
5718 	      op0 = expand_compound_operation (op0);
5719 	      return simplify_gen_binary (XOR, mode,
5720 					  gen_lowpart (mode, op0),
5721 					  const1_rtx);
5722 	    }
5723 
5724 	  else if (STORE_FLAG_VALUE == 1
5725 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5726 		   && op1 == const0_rtx
5727 		   && mode == GET_MODE (op0)
5728 		   && (num_sign_bit_copies (op0, mode)
5729 		       == GET_MODE_PRECISION (mode)))
5730 	    {
5731 	      op0 = expand_compound_operation (op0);
5732 	      return plus_constant (mode, gen_lowpart (mode, op0), 1);
5733 	    }
5734 
5735 	  /* If STORE_FLAG_VALUE is -1, we have cases similar to
5736 	     those above.  */
5737 	  if (in_cond)
5738 	    ;
5739 
5740 	  else if (STORE_FLAG_VALUE == -1
5741 	      && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5742 	      && op1 == const0_rtx
5743 	      && (num_sign_bit_copies (op0, mode)
5744 		  == GET_MODE_PRECISION (mode)))
5745 	    return gen_lowpart (mode,
5746 				expand_compound_operation (op0));
5747 
5748 	  else if (STORE_FLAG_VALUE == -1
5749 		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5750 		   && op1 == const0_rtx
5751 		   && mode == GET_MODE (op0)
5752 		   && nonzero_bits (op0, mode) == 1)
5753 	    {
5754 	      op0 = expand_compound_operation (op0);
5755 	      return simplify_gen_unary (NEG, mode,
5756 					 gen_lowpart (mode, op0),
5757 					 mode);
5758 	    }
5759 
5760 	  else if (STORE_FLAG_VALUE == -1
5761 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5762 		   && op1 == const0_rtx
5763 		   && mode == GET_MODE (op0)
5764 		   && (num_sign_bit_copies (op0, mode)
5765 		       == GET_MODE_PRECISION (mode)))
5766 	    {
5767 	      op0 = expand_compound_operation (op0);
5768 	      return simplify_gen_unary (NOT, mode,
5769 					 gen_lowpart (mode, op0),
5770 					 mode);
5771 	    }
5772 
5773 	  /* If X is 0/1, (eq X 0) is X-1.  */
5774 	  else if (STORE_FLAG_VALUE == -1
5775 		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
5776 		   && op1 == const0_rtx
5777 		   && mode == GET_MODE (op0)
5778 		   && nonzero_bits (op0, mode) == 1)
5779 	    {
5780 	      op0 = expand_compound_operation (op0);
5781 	      return plus_constant (mode, gen_lowpart (mode, op0), -1);
5782 	    }
5783 
5784 	  /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
5785 	     one bit that might be nonzero, we can convert (ne x 0) to
5786 	     (ashift x c) where C puts the bit in the sign bit.  Remove any
5787 	     AND with STORE_FLAG_VALUE when we are done, since we are only
5788 	     going to test the sign bit.  */
5789 	  if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5790 	      && HWI_COMPUTABLE_MODE_P (mode)
5791 	      && val_signbit_p (mode, STORE_FLAG_VALUE)
5792 	      && op1 == const0_rtx
5793 	      && mode == GET_MODE (op0)
5794 	      && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
5795 	    {
5796 	      x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
5797 					expand_compound_operation (op0),
5798 					GET_MODE_PRECISION (mode) - 1 - i);
5799 	      if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
5800 		return XEXP (x, 0);
5801 	      else
5802 		return x;
5803 	    }
5804 
5805 	  /* If the code changed, return a whole new comparison.
5806 	     We also need to avoid using SUBST in cases where
5807 	     simplify_comparison has widened a comparison with a CONST_INT,
5808 	     since in that case the wider CONST_INT may fail the sanity
5809 	     checks in do_SUBST.  */
5810 	  if (new_code != code
5811 	      || (CONST_INT_P (op1)
5812 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
5813 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
5814 	    return gen_rtx_fmt_ee (new_code, mode, op0, op1);
5815 
5816 	  /* Otherwise, keep this operation, but maybe change its operands.
5817 	     This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR).  */
5818 	  SUBST (XEXP (x, 0), op0);
5819 	  SUBST (XEXP (x, 1), op1);
5820 	}
5821       break;
5822 
5823     case IF_THEN_ELSE:
5824       return simplify_if_then_else (x);
5825 
5826     case ZERO_EXTRACT:
5827     case SIGN_EXTRACT:
5828     case ZERO_EXTEND:
5829     case SIGN_EXTEND:
5830       /* If we are processing SET_DEST, we are done.  */
5831       if (in_dest)
5832 	return x;
5833 
5834       return expand_compound_operation (x);
5835 
5836     case SET:
5837       return simplify_set (x);
5838 
5839     case AND:
5840     case IOR:
5841       return simplify_logical (x);
5842 
5843     case ASHIFT:
5844     case LSHIFTRT:
5845     case ASHIFTRT:
5846     case ROTATE:
5847     case ROTATERT:
5848       /* If this is a shift by a constant amount, simplify it.  */
5849       if (CONST_INT_P (XEXP (x, 1)))
5850 	return simplify_shift_const (x, code, mode, XEXP (x, 0),
5851 				     INTVAL (XEXP (x, 1)));
5852 
5853       else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
5854 	SUBST (XEXP (x, 1),
5855 	       force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
5856 			      ((unsigned HOST_WIDE_INT) 1
5857 			       << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
5858 			      - 1,
5859 			      0));
5860       break;
5861 
5862     default:
5863       break;
5864     }
5865 
5866   return x;
5867 }
5868 
5869 /* Simplify X, an IF_THEN_ELSE expression.  Return the new expression.  */
5870 
5871 static rtx
5872 simplify_if_then_else (rtx x)
5873 {
5874   enum machine_mode mode = GET_MODE (x);
5875   rtx cond = XEXP (x, 0);
5876   rtx true_rtx = XEXP (x, 1);
5877   rtx false_rtx = XEXP (x, 2);
5878   enum rtx_code true_code = GET_CODE (cond);
5879   int comparison_p = COMPARISON_P (cond);
5880   rtx temp;
5881   int i;
5882   enum rtx_code false_code;
5883   rtx reversed;
5884 
5885   /* Simplify storing of the truth value.  */
5886   if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
5887     return simplify_gen_relational (true_code, mode, VOIDmode,
5888 				    XEXP (cond, 0), XEXP (cond, 1));
5889 
5890   /* Also when the truth value has to be reversed.  */
5891   if (comparison_p
5892       && true_rtx == const0_rtx && false_rtx == const_true_rtx
5893       && (reversed = reversed_comparison (cond, mode)))
5894     return reversed;
5895 
5896   /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
5897      in it is being compared against certain values.  Get the true and false
5898      comparisons and see if that says anything about the value of each arm.  */
5899 
5900   if (comparison_p
5901       && ((false_code = reversed_comparison_code (cond, NULL))
5902 	  != UNKNOWN)
5903       && REG_P (XEXP (cond, 0)))
5904     {
5905       HOST_WIDE_INT nzb;
5906       rtx from = XEXP (cond, 0);
5907       rtx true_val = XEXP (cond, 1);
5908       rtx false_val = true_val;
5909       int swapped = 0;
5910 
5911       /* If FALSE_CODE is EQ, swap the codes and arms.  */
5912 
5913       if (false_code == EQ)
5914 	{
5915 	  swapped = 1, true_code = EQ, false_code = NE;
5916 	  temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5917 	}
5918 
5919       /* If we are comparing against zero and the expression being tested has
5920 	 only a single bit that might be nonzero, that is its value when it is
5921 	 not equal to zero.  Similarly if it is known to be -1 or 0.  */
5922 
5923       if (true_code == EQ && true_val == const0_rtx
5924 	  && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
5925 	{
5926 	  false_code = EQ;
5927 	  false_val = gen_int_mode (nzb, GET_MODE (from));
5928 	}
5929       else if (true_code == EQ && true_val == const0_rtx
5930 	       && (num_sign_bit_copies (from, GET_MODE (from))
5931 		   == GET_MODE_PRECISION (GET_MODE (from))))
5932 	{
5933 	  false_code = EQ;
5934 	  false_val = constm1_rtx;
5935 	}
5936 
5937       /* Now simplify an arm if we know the value of the register in the
5938 	 branch and it is used in the arm.  Be careful due to the potential
5939 	 of locally-shared RTL.  */
5940 
5941       if (reg_mentioned_p (from, true_rtx))
5942 	true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
5943 				      from, true_val),
5944 			  pc_rtx, pc_rtx, 0, 0, 0);
5945       if (reg_mentioned_p (from, false_rtx))
5946 	false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
5947 				   from, false_val),
5948 			   pc_rtx, pc_rtx, 0, 0, 0);
5949 
5950       SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
5951       SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
5952 
5953       true_rtx = XEXP (x, 1);
5954       false_rtx = XEXP (x, 2);
5955       true_code = GET_CODE (cond);
5956     }
5957 
5958   /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
5959      reversed, do so to avoid needing two sets of patterns for
5960      subtract-and-branch insns.  Similarly if we have a constant in the true
5961      arm, the false arm is the same as the first operand of the comparison, or
5962      the false arm is more complicated than the true arm.  */
5963 
5964   if (comparison_p
5965       && reversed_comparison_code (cond, NULL) != UNKNOWN
5966       && (true_rtx == pc_rtx
5967 	  || (CONSTANT_P (true_rtx)
5968 	      && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
5969 	  || true_rtx == const0_rtx
5970 	  || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
5971 	  || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
5972 	      && !OBJECT_P (false_rtx))
5973 	  || reg_mentioned_p (true_rtx, false_rtx)
5974 	  || rtx_equal_p (false_rtx, XEXP (cond, 0))))
5975     {
5976       true_code = reversed_comparison_code (cond, NULL);
5977       SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
5978       SUBST (XEXP (x, 1), false_rtx);
5979       SUBST (XEXP (x, 2), true_rtx);
5980 
5981       temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
5982       cond = XEXP (x, 0);
5983 
5984       /* It is possible that the conditional has been simplified out.  */
5985       true_code = GET_CODE (cond);
5986       comparison_p = COMPARISON_P (cond);
5987     }
5988 
5989   /* If the two arms are identical, we don't need the comparison.  */
5990 
5991   if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
5992     return true_rtx;
5993 
5994   /* Convert a == b ? b : a to "a".  */
5995   if (true_code == EQ && ! side_effects_p (cond)
5996       && !HONOR_NANS (mode)
5997       && rtx_equal_p (XEXP (cond, 0), false_rtx)
5998       && rtx_equal_p (XEXP (cond, 1), true_rtx))
5999     return false_rtx;
6000   else if (true_code == NE && ! side_effects_p (cond)
6001 	   && !HONOR_NANS (mode)
6002 	   && rtx_equal_p (XEXP (cond, 0), true_rtx)
6003 	   && rtx_equal_p (XEXP (cond, 1), false_rtx))
6004     return true_rtx;
6005 
6006   /* Look for cases where we have (abs x) or (neg (abs X)).  */
6007 
6008   if (GET_MODE_CLASS (mode) == MODE_INT
6009       && comparison_p
6010       && XEXP (cond, 1) == const0_rtx
6011       && GET_CODE (false_rtx) == NEG
6012       && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6013       && rtx_equal_p (true_rtx, XEXP (cond, 0))
6014       && ! side_effects_p (true_rtx))
6015     switch (true_code)
6016       {
6017       case GT:
6018       case GE:
6019 	return simplify_gen_unary (ABS, mode, true_rtx, mode);
6020       case LT:
6021       case LE:
6022 	return
6023 	  simplify_gen_unary (NEG, mode,
6024 			      simplify_gen_unary (ABS, mode, true_rtx, mode),
6025 			      mode);
6026       default:
6027 	break;
6028       }
6029 
6030   /* Look for MIN or MAX.  */
6031 
6032   if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6033       && comparison_p
6034       && rtx_equal_p (XEXP (cond, 0), true_rtx)
6035       && rtx_equal_p (XEXP (cond, 1), false_rtx)
6036       && ! side_effects_p (cond))
6037     switch (true_code)
6038       {
6039       case GE:
6040       case GT:
6041 	return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6042       case LE:
6043       case LT:
6044 	return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6045       case GEU:
6046       case GTU:
6047 	return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6048       case LEU:
6049       case LTU:
6050 	return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6051       default:
6052 	break;
6053       }
6054 
6055   /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6056      second operand is zero, this can be done as (OP Z (mult COND C2)) where
6057      C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6058      SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6059      We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6060      neither 1 or -1, but it isn't worth checking for.  */
6061 
6062   if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6063       && comparison_p
6064       && GET_MODE_CLASS (mode) == MODE_INT
6065       && ! side_effects_p (x))
6066     {
6067       rtx t = make_compound_operation (true_rtx, SET);
6068       rtx f = make_compound_operation (false_rtx, SET);
6069       rtx cond_op0 = XEXP (cond, 0);
6070       rtx cond_op1 = XEXP (cond, 1);
6071       enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6072       enum machine_mode m = mode;
6073       rtx z = 0, c1 = NULL_RTX;
6074 
6075       if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6076 	   || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6077 	   || GET_CODE (t) == ASHIFT
6078 	   || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6079 	  && rtx_equal_p (XEXP (t, 0), f))
6080 	c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6081 
6082       /* If an identity-zero op is commutative, check whether there
6083 	 would be a match if we swapped the operands.  */
6084       else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6085 		|| GET_CODE (t) == XOR)
6086 	       && rtx_equal_p (XEXP (t, 1), f))
6087 	c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6088       else if (GET_CODE (t) == SIGN_EXTEND
6089 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6090 		   || GET_CODE (XEXP (t, 0)) == MINUS
6091 		   || GET_CODE (XEXP (t, 0)) == IOR
6092 		   || GET_CODE (XEXP (t, 0)) == XOR
6093 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6094 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6095 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6096 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6097 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6098 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6099 	       && (num_sign_bit_copies (f, GET_MODE (f))
6100 		   > (unsigned int)
6101 		     (GET_MODE_PRECISION (mode)
6102 		      - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6103 	{
6104 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6105 	  extend_op = SIGN_EXTEND;
6106 	  m = GET_MODE (XEXP (t, 0));
6107 	}
6108       else if (GET_CODE (t) == SIGN_EXTEND
6109 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6110 		   || GET_CODE (XEXP (t, 0)) == IOR
6111 		   || GET_CODE (XEXP (t, 0)) == XOR)
6112 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6113 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6114 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6115 	       && (num_sign_bit_copies (f, GET_MODE (f))
6116 		   > (unsigned int)
6117 		     (GET_MODE_PRECISION (mode)
6118 		      - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6119 	{
6120 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6121 	  extend_op = SIGN_EXTEND;
6122 	  m = GET_MODE (XEXP (t, 0));
6123 	}
6124       else if (GET_CODE (t) == ZERO_EXTEND
6125 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6126 		   || GET_CODE (XEXP (t, 0)) == MINUS
6127 		   || GET_CODE (XEXP (t, 0)) == IOR
6128 		   || GET_CODE (XEXP (t, 0)) == XOR
6129 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6130 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6131 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6132 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6133 	       && HWI_COMPUTABLE_MODE_P (mode)
6134 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6135 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6136 	       && ((nonzero_bits (f, GET_MODE (f))
6137 		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6138 		   == 0))
6139 	{
6140 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6141 	  extend_op = ZERO_EXTEND;
6142 	  m = GET_MODE (XEXP (t, 0));
6143 	}
6144       else if (GET_CODE (t) == ZERO_EXTEND
6145 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6146 		   || GET_CODE (XEXP (t, 0)) == IOR
6147 		   || GET_CODE (XEXP (t, 0)) == XOR)
6148 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6149 	       && HWI_COMPUTABLE_MODE_P (mode)
6150 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6151 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6152 	       && ((nonzero_bits (f, GET_MODE (f))
6153 		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6154 		   == 0))
6155 	{
6156 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6157 	  extend_op = ZERO_EXTEND;
6158 	  m = GET_MODE (XEXP (t, 0));
6159 	}
6160 
6161       if (z)
6162 	{
6163 	  temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6164 						 cond_op0, cond_op1),
6165 			pc_rtx, pc_rtx, 0, 0, 0);
6166 	  temp = simplify_gen_binary (MULT, m, temp,
6167 				      simplify_gen_binary (MULT, m, c1,
6168 							   const_true_rtx));
6169 	  temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6170 	  temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6171 
6172 	  if (extend_op != UNKNOWN)
6173 	    temp = simplify_gen_unary (extend_op, mode, temp, m);
6174 
6175 	  return temp;
6176 	}
6177     }
6178 
6179   /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6180      1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6181      negation of a single bit, we can convert this operation to a shift.  We
6182      can actually do this more generally, but it doesn't seem worth it.  */
6183 
6184   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6185       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6186       && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6187 	   && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6188 	  || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6189 	       == GET_MODE_PRECISION (mode))
6190 	      && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6191     return
6192       simplify_shift_const (NULL_RTX, ASHIFT, mode,
6193 			    gen_lowpart (mode, XEXP (cond, 0)), i);
6194 
6195   /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8.  */
6196   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6197       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6198       && GET_MODE (XEXP (cond, 0)) == mode
6199       && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6200 	  == nonzero_bits (XEXP (cond, 0), mode)
6201       && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6202     return XEXP (cond, 0);
6203 
6204   return x;
6205 }
6206 
6207 /* Simplify X, a SET expression.  Return the new expression.  */
6208 
6209 static rtx
6210 simplify_set (rtx x)
6211 {
6212   rtx src = SET_SRC (x);
6213   rtx dest = SET_DEST (x);
6214   enum machine_mode mode
6215     = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6216   rtx other_insn;
6217   rtx *cc_use;
6218 
6219   /* (set (pc) (return)) gets written as (return).  */
6220   if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6221     return src;
6222 
6223   /* Now that we know for sure which bits of SRC we are using, see if we can
6224      simplify the expression for the object knowing that we only need the
6225      low-order bits.  */
6226 
6227   if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6228     {
6229       src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6230       SUBST (SET_SRC (x), src);
6231     }
6232 
6233   /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6234      the comparison result and try to simplify it unless we already have used
6235      undobuf.other_insn.  */
6236   if ((GET_MODE_CLASS (mode) == MODE_CC
6237        || GET_CODE (src) == COMPARE
6238        || CC0_P (dest))
6239       && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6240       && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6241       && COMPARISON_P (*cc_use)
6242       && rtx_equal_p (XEXP (*cc_use, 0), dest))
6243     {
6244       enum rtx_code old_code = GET_CODE (*cc_use);
6245       enum rtx_code new_code;
6246       rtx op0, op1, tmp;
6247       int other_changed = 0;
6248       rtx inner_compare = NULL_RTX;
6249       enum machine_mode compare_mode = GET_MODE (dest);
6250 
6251       if (GET_CODE (src) == COMPARE)
6252 	{
6253 	  op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6254 	  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6255 	    {
6256 	      inner_compare = op0;
6257 	      op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6258 	    }
6259 	}
6260       else
6261 	op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6262 
6263       tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6264 					   op0, op1);
6265       if (!tmp)
6266 	new_code = old_code;
6267       else if (!CONSTANT_P (tmp))
6268 	{
6269 	  new_code = GET_CODE (tmp);
6270 	  op0 = XEXP (tmp, 0);
6271 	  op1 = XEXP (tmp, 1);
6272 	}
6273       else
6274 	{
6275 	  rtx pat = PATTERN (other_insn);
6276 	  undobuf.other_insn = other_insn;
6277 	  SUBST (*cc_use, tmp);
6278 
6279 	  /* Attempt to simplify CC user.  */
6280 	  if (GET_CODE (pat) == SET)
6281 	    {
6282 	      rtx new_rtx = simplify_rtx (SET_SRC (pat));
6283 	      if (new_rtx != NULL_RTX)
6284 		SUBST (SET_SRC (pat), new_rtx);
6285 	    }
6286 
6287 	  /* Convert X into a no-op move.  */
6288 	  SUBST (SET_DEST (x), pc_rtx);
6289 	  SUBST (SET_SRC (x), pc_rtx);
6290 	  return x;
6291 	}
6292 
6293       /* Simplify our comparison, if possible.  */
6294       new_code = simplify_comparison (new_code, &op0, &op1);
6295 
6296 #ifdef SELECT_CC_MODE
6297       /* If this machine has CC modes other than CCmode, check to see if we
6298 	 need to use a different CC mode here.  */
6299       if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6300 	compare_mode = GET_MODE (op0);
6301       else if (inner_compare
6302 	       && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6303 	       && new_code == old_code
6304 	       && op0 == XEXP (inner_compare, 0)
6305 	       && op1 == XEXP (inner_compare, 1))
6306 	compare_mode = GET_MODE (inner_compare);
6307       else
6308 	compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6309 
6310 #ifndef HAVE_cc0
6311       /* If the mode changed, we have to change SET_DEST, the mode in the
6312 	 compare, and the mode in the place SET_DEST is used.  If SET_DEST is
6313 	 a hard register, just build new versions with the proper mode.  If it
6314 	 is a pseudo, we lose unless it is only time we set the pseudo, in
6315 	 which case we can safely change its mode.  */
6316       if (compare_mode != GET_MODE (dest))
6317 	{
6318 	  if (can_change_dest_mode (dest, 0, compare_mode))
6319 	    {
6320 	      unsigned int regno = REGNO (dest);
6321 	      rtx new_dest;
6322 
6323 	      if (regno < FIRST_PSEUDO_REGISTER)
6324 		new_dest = gen_rtx_REG (compare_mode, regno);
6325 	      else
6326 		{
6327 		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6328 		  new_dest = regno_reg_rtx[regno];
6329 		}
6330 
6331 	      SUBST (SET_DEST (x), new_dest);
6332 	      SUBST (XEXP (*cc_use, 0), new_dest);
6333 	      other_changed = 1;
6334 
6335 	      dest = new_dest;
6336 	    }
6337 	}
6338 #endif  /* cc0 */
6339 #endif  /* SELECT_CC_MODE */
6340 
6341       /* If the code changed, we have to build a new comparison in
6342 	 undobuf.other_insn.  */
6343       if (new_code != old_code)
6344 	{
6345 	  int other_changed_previously = other_changed;
6346 	  unsigned HOST_WIDE_INT mask;
6347 	  rtx old_cc_use = *cc_use;
6348 
6349 	  SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6350 					  dest, const0_rtx));
6351 	  other_changed = 1;
6352 
6353 	  /* If the only change we made was to change an EQ into an NE or
6354 	     vice versa, OP0 has only one bit that might be nonzero, and OP1
6355 	     is zero, check if changing the user of the condition code will
6356 	     produce a valid insn.  If it won't, we can keep the original code
6357 	     in that insn by surrounding our operation with an XOR.  */
6358 
6359 	  if (((old_code == NE && new_code == EQ)
6360 	       || (old_code == EQ && new_code == NE))
6361 	      && ! other_changed_previously && op1 == const0_rtx
6362 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6363 	      && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6364 	    {
6365 	      rtx pat = PATTERN (other_insn), note = 0;
6366 
6367 	      if ((recog_for_combine (&pat, other_insn, &note) < 0
6368 		   && ! check_asm_operands (pat)))
6369 		{
6370 		  *cc_use = old_cc_use;
6371 		  other_changed = 0;
6372 
6373 		  op0 = simplify_gen_binary (XOR, GET_MODE (op0),
6374 					     op0, GEN_INT (mask));
6375 		}
6376 	    }
6377 	}
6378 
6379       if (other_changed)
6380 	undobuf.other_insn = other_insn;
6381 
6382       /* Otherwise, if we didn't previously have a COMPARE in the
6383 	 correct mode, we need one.  */
6384       if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
6385 	{
6386 	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6387 	  src = SET_SRC (x);
6388 	}
6389       else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6390 	{
6391 	  SUBST (SET_SRC (x), op0);
6392 	  src = SET_SRC (x);
6393 	}
6394       /* Otherwise, update the COMPARE if needed.  */
6395       else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6396 	{
6397 	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6398 	  src = SET_SRC (x);
6399 	}
6400     }
6401   else
6402     {
6403       /* Get SET_SRC in a form where we have placed back any
6404 	 compound expressions.  Then do the checks below.  */
6405       src = make_compound_operation (src, SET);
6406       SUBST (SET_SRC (x), src);
6407     }
6408 
6409   /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6410      and X being a REG or (subreg (reg)), we may be able to convert this to
6411      (set (subreg:m2 x) (op)).
6412 
6413      We can always do this if M1 is narrower than M2 because that means that
6414      we only care about the low bits of the result.
6415 
6416      However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6417      perform a narrower operation than requested since the high-order bits will
6418      be undefined.  On machine where it is defined, this transformation is safe
6419      as long as M1 and M2 have the same number of words.  */
6420 
6421   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6422       && !OBJECT_P (SUBREG_REG (src))
6423       && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6424 	   / UNITS_PER_WORD)
6425 	  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6426 	       + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6427 #ifndef WORD_REGISTER_OPERATIONS
6428       && (GET_MODE_SIZE (GET_MODE (src))
6429 	< GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
6430 #endif
6431 #ifdef CANNOT_CHANGE_MODE_CLASS
6432       && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6433 	    && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6434 					 GET_MODE (SUBREG_REG (src)),
6435 					 GET_MODE (src)))
6436 #endif
6437       && (REG_P (dest)
6438 	  || (GET_CODE (dest) == SUBREG
6439 	      && REG_P (SUBREG_REG (dest)))))
6440     {
6441       SUBST (SET_DEST (x),
6442 	     gen_lowpart (GET_MODE (SUBREG_REG (src)),
6443 				      dest));
6444       SUBST (SET_SRC (x), SUBREG_REG (src));
6445 
6446       src = SET_SRC (x), dest = SET_DEST (x);
6447     }
6448 
6449 #ifdef HAVE_cc0
6450   /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6451      in SRC.  */
6452   if (dest == cc0_rtx
6453       && GET_CODE (src) == SUBREG
6454       && subreg_lowpart_p (src)
6455       && (GET_MODE_PRECISION (GET_MODE (src))
6456 	  < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6457     {
6458       rtx inner = SUBREG_REG (src);
6459       enum machine_mode inner_mode = GET_MODE (inner);
6460 
6461       /* Here we make sure that we don't have a sign bit on.  */
6462       if (val_signbit_known_clear_p (GET_MODE (src),
6463 				     nonzero_bits (inner, inner_mode)))
6464 	{
6465 	  SUBST (SET_SRC (x), inner);
6466 	  src = SET_SRC (x);
6467 	}
6468     }
6469 #endif
6470 
6471 #ifdef LOAD_EXTEND_OP
6472   /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6473      would require a paradoxical subreg.  Replace the subreg with a
6474      zero_extend to avoid the reload that would otherwise be required.  */
6475 
6476   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6477       && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6478       && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6479       && SUBREG_BYTE (src) == 0
6480       && paradoxical_subreg_p (src)
6481       && MEM_P (SUBREG_REG (src)))
6482     {
6483       SUBST (SET_SRC (x),
6484 	     gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6485 			    GET_MODE (src), SUBREG_REG (src)));
6486 
6487       src = SET_SRC (x);
6488     }
6489 #endif
6490 
6491   /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6492      are comparing an item known to be 0 or -1 against 0, use a logical
6493      operation instead. Check for one of the arms being an IOR of the other
6494      arm with some value.  We compute three terms to be IOR'ed together.  In
6495      practice, at most two will be nonzero.  Then we do the IOR's.  */
6496 
6497   if (GET_CODE (dest) != PC
6498       && GET_CODE (src) == IF_THEN_ELSE
6499       && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6500       && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6501       && XEXP (XEXP (src, 0), 1) == const0_rtx
6502       && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6503 #ifdef HAVE_conditional_move
6504       && ! can_conditionally_move_p (GET_MODE (src))
6505 #endif
6506       && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6507 			       GET_MODE (XEXP (XEXP (src, 0), 0)))
6508 	  == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6509       && ! side_effects_p (src))
6510     {
6511       rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6512 		      ? XEXP (src, 1) : XEXP (src, 2));
6513       rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6514 		   ? XEXP (src, 2) : XEXP (src, 1));
6515       rtx term1 = const0_rtx, term2, term3;
6516 
6517       if (GET_CODE (true_rtx) == IOR
6518 	  && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6519 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6520       else if (GET_CODE (true_rtx) == IOR
6521 	       && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6522 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6523       else if (GET_CODE (false_rtx) == IOR
6524 	       && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6525 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6526       else if (GET_CODE (false_rtx) == IOR
6527 	       && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6528 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6529 
6530       term2 = simplify_gen_binary (AND, GET_MODE (src),
6531 				   XEXP (XEXP (src, 0), 0), true_rtx);
6532       term3 = simplify_gen_binary (AND, GET_MODE (src),
6533 				   simplify_gen_unary (NOT, GET_MODE (src),
6534 						       XEXP (XEXP (src, 0), 0),
6535 						       GET_MODE (src)),
6536 				   false_rtx);
6537 
6538       SUBST (SET_SRC (x),
6539 	     simplify_gen_binary (IOR, GET_MODE (src),
6540 				  simplify_gen_binary (IOR, GET_MODE (src),
6541 						       term1, term2),
6542 				  term3));
6543 
6544       src = SET_SRC (x);
6545     }
6546 
6547   /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6548      whole thing fail.  */
6549   if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6550     return src;
6551   else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6552     return dest;
6553   else
6554     /* Convert this into a field assignment operation, if possible.  */
6555     return make_field_assignment (x);
6556 }
6557 
6558 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6559    result.  */
6560 
6561 static rtx
6562 simplify_logical (rtx x)
6563 {
6564   enum machine_mode mode = GET_MODE (x);
6565   rtx op0 = XEXP (x, 0);
6566   rtx op1 = XEXP (x, 1);
6567 
6568   switch (GET_CODE (x))
6569     {
6570     case AND:
6571       /* We can call simplify_and_const_int only if we don't lose
6572 	 any (sign) bits when converting INTVAL (op1) to
6573 	 "unsigned HOST_WIDE_INT".  */
6574       if (CONST_INT_P (op1)
6575 	  && (HWI_COMPUTABLE_MODE_P (mode)
6576 	      || INTVAL (op1) > 0))
6577 	{
6578 	  x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6579 	  if (GET_CODE (x) != AND)
6580 	    return x;
6581 
6582 	  op0 = XEXP (x, 0);
6583 	  op1 = XEXP (x, 1);
6584 	}
6585 
6586       /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6587 	 apply the distributive law and then the inverse distributive
6588 	 law to see if things simplify.  */
6589       if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6590 	{
6591 	  rtx result = distribute_and_simplify_rtx (x, 0);
6592 	  if (result)
6593 	    return result;
6594 	}
6595       if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6596 	{
6597 	  rtx result = distribute_and_simplify_rtx (x, 1);
6598 	  if (result)
6599 	    return result;
6600 	}
6601       break;
6602 
6603     case IOR:
6604       /* If we have (ior (and A B) C), apply the distributive law and then
6605 	 the inverse distributive law to see if things simplify.  */
6606 
6607       if (GET_CODE (op0) == AND)
6608 	{
6609 	  rtx result = distribute_and_simplify_rtx (x, 0);
6610 	  if (result)
6611 	    return result;
6612 	}
6613 
6614       if (GET_CODE (op1) == AND)
6615 	{
6616 	  rtx result = distribute_and_simplify_rtx (x, 1);
6617 	  if (result)
6618 	    return result;
6619 	}
6620       break;
6621 
6622     default:
6623       gcc_unreachable ();
6624     }
6625 
6626   return x;
6627 }
6628 
6629 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6630    operations" because they can be replaced with two more basic operations.
6631    ZERO_EXTEND is also considered "compound" because it can be replaced with
6632    an AND operation, which is simpler, though only one operation.
6633 
6634    The function expand_compound_operation is called with an rtx expression
6635    and will convert it to the appropriate shifts and AND operations,
6636    simplifying at each stage.
6637 
6638    The function make_compound_operation is called to convert an expression
6639    consisting of shifts and ANDs into the equivalent compound expression.
6640    It is the inverse of this function, loosely speaking.  */
6641 
6642 static rtx
6643 expand_compound_operation (rtx x)
6644 {
6645   unsigned HOST_WIDE_INT pos = 0, len;
6646   int unsignedp = 0;
6647   unsigned int modewidth;
6648   rtx tem;
6649 
6650   switch (GET_CODE (x))
6651     {
6652     case ZERO_EXTEND:
6653       unsignedp = 1;
6654     case SIGN_EXTEND:
6655       /* We can't necessarily use a const_int for a multiword mode;
6656 	 it depends on implicitly extending the value.
6657 	 Since we don't know the right way to extend it,
6658 	 we can't tell whether the implicit way is right.
6659 
6660 	 Even for a mode that is no wider than a const_int,
6661 	 we can't win, because we need to sign extend one of its bits through
6662 	 the rest of it, and we don't know which bit.  */
6663       if (CONST_INT_P (XEXP (x, 0)))
6664 	return x;
6665 
6666       /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6667 	 (zero_extend:MODE FROM) or (sign_extend:MODE FROM).  It is for any MEM
6668 	 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6669 	 reloaded. If not for that, MEM's would very rarely be safe.
6670 
6671 	 Reject MODEs bigger than a word, because we might not be able
6672 	 to reference a two-register group starting with an arbitrary register
6673 	 (and currently gen_lowpart might crash for a SUBREG).  */
6674 
6675       if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6676 	return x;
6677 
6678       /* Reject MODEs that aren't scalar integers because turning vector
6679 	 or complex modes into shifts causes problems.  */
6680 
6681       if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6682 	return x;
6683 
6684       len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
6685       /* If the inner object has VOIDmode (the only way this can happen
6686 	 is if it is an ASM_OPERANDS), we can't do anything since we don't
6687 	 know how much masking to do.  */
6688       if (len == 0)
6689 	return x;
6690 
6691       break;
6692 
6693     case ZERO_EXTRACT:
6694       unsignedp = 1;
6695 
6696       /* ... fall through ...  */
6697 
6698     case SIGN_EXTRACT:
6699       /* If the operand is a CLOBBER, just return it.  */
6700       if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6701 	return XEXP (x, 0);
6702 
6703       if (!CONST_INT_P (XEXP (x, 1))
6704 	  || !CONST_INT_P (XEXP (x, 2))
6705 	  || GET_MODE (XEXP (x, 0)) == VOIDmode)
6706 	return x;
6707 
6708       /* Reject MODEs that aren't scalar integers because turning vector
6709 	 or complex modes into shifts causes problems.  */
6710 
6711       if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6712 	return x;
6713 
6714       len = INTVAL (XEXP (x, 1));
6715       pos = INTVAL (XEXP (x, 2));
6716 
6717       /* This should stay within the object being extracted, fail otherwise.  */
6718       if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
6719 	return x;
6720 
6721       if (BITS_BIG_ENDIAN)
6722 	pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
6723 
6724       break;
6725 
6726     default:
6727       return x;
6728     }
6729   /* Convert sign extension to zero extension, if we know that the high
6730      bit is not set, as this is easier to optimize.  It will be converted
6731      back to cheaper alternative in make_extraction.  */
6732   if (GET_CODE (x) == SIGN_EXTEND
6733       && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6734 	  && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
6735 		& ~(((unsigned HOST_WIDE_INT)
6736 		      GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
6737 		     >> 1))
6738 	       == 0)))
6739     {
6740       rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
6741       rtx temp2 = expand_compound_operation (temp);
6742 
6743       /* Make sure this is a profitable operation.  */
6744       if (set_src_cost (x, optimize_this_for_speed_p)
6745           > set_src_cost (temp2, optimize_this_for_speed_p))
6746        return temp2;
6747       else if (set_src_cost (x, optimize_this_for_speed_p)
6748                > set_src_cost (temp, optimize_this_for_speed_p))
6749        return temp;
6750       else
6751        return x;
6752     }
6753 
6754   /* We can optimize some special cases of ZERO_EXTEND.  */
6755   if (GET_CODE (x) == ZERO_EXTEND)
6756     {
6757       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
6758 	 know that the last value didn't have any inappropriate bits
6759 	 set.  */
6760       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6761 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6762 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6763 	  && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
6764 	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6765 	return XEXP (XEXP (x, 0), 0);
6766 
6767       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
6768       if (GET_CODE (XEXP (x, 0)) == SUBREG
6769 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6770 	  && subreg_lowpart_p (XEXP (x, 0))
6771 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
6772 	  && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
6773 	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6774 	return SUBREG_REG (XEXP (x, 0));
6775 
6776       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
6777 	 is a comparison and STORE_FLAG_VALUE permits.  This is like
6778 	 the first case, but it works even when GET_MODE (x) is larger
6779 	 than HOST_WIDE_INT.  */
6780       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
6781 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
6782 	  && COMPARISON_P (XEXP (XEXP (x, 0), 0))
6783 	  && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6784 	      <= HOST_BITS_PER_WIDE_INT)
6785 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6786 	return XEXP (XEXP (x, 0), 0);
6787 
6788       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
6789       if (GET_CODE (XEXP (x, 0)) == SUBREG
6790 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
6791 	  && subreg_lowpart_p (XEXP (x, 0))
6792 	  && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
6793 	  && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
6794 	      <= HOST_BITS_PER_WIDE_INT)
6795 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
6796 	return SUBREG_REG (XEXP (x, 0));
6797 
6798     }
6799 
6800   /* If we reach here, we want to return a pair of shifts.  The inner
6801      shift is a left shift of BITSIZE - POS - LEN bits.  The outer
6802      shift is a right shift of BITSIZE - LEN bits.  It is arithmetic or
6803      logical depending on the value of UNSIGNEDP.
6804 
6805      If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
6806      converted into an AND of a shift.
6807 
6808      We must check for the case where the left shift would have a negative
6809      count.  This can happen in a case like (x >> 31) & 255 on machines
6810      that can't shift by a constant.  On those machines, we would first
6811      combine the shift with the AND to produce a variable-position
6812      extraction.  Then the constant of 31 would be substituted in
6813      to produce such a position.  */
6814 
6815   modewidth = GET_MODE_PRECISION (GET_MODE (x));
6816   if (modewidth >= pos + len)
6817     {
6818       enum machine_mode mode = GET_MODE (x);
6819       tem = gen_lowpart (mode, XEXP (x, 0));
6820       if (!tem || GET_CODE (tem) == CLOBBER)
6821 	return x;
6822       tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6823 				  tem, modewidth - pos - len);
6824       tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
6825 				  mode, tem, modewidth - len);
6826     }
6827   else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
6828     tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
6829 				  simplify_shift_const (NULL_RTX, LSHIFTRT,
6830 							GET_MODE (x),
6831 							XEXP (x, 0), pos),
6832 				  ((unsigned HOST_WIDE_INT) 1 << len) - 1);
6833   else
6834     /* Any other cases we can't handle.  */
6835     return x;
6836 
6837   /* If we couldn't do this for some reason, return the original
6838      expression.  */
6839   if (GET_CODE (tem) == CLOBBER)
6840     return x;
6841 
6842   return tem;
6843 }
6844 
6845 /* X is a SET which contains an assignment of one object into
6846    a part of another (such as a bit-field assignment, STRICT_LOW_PART,
6847    or certain SUBREGS). If possible, convert it into a series of
6848    logical operations.
6849 
6850    We half-heartedly support variable positions, but do not at all
6851    support variable lengths.  */
6852 
6853 static const_rtx
6854 expand_field_assignment (const_rtx x)
6855 {
6856   rtx inner;
6857   rtx pos;			/* Always counts from low bit.  */
6858   int len;
6859   rtx mask, cleared, masked;
6860   enum machine_mode compute_mode;
6861 
6862   /* Loop until we find something we can't simplify.  */
6863   while (1)
6864     {
6865       if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
6866 	  && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
6867 	{
6868 	  inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
6869 	  len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
6870 	  pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
6871 	}
6872       else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
6873 	       && CONST_INT_P (XEXP (SET_DEST (x), 1)))
6874 	{
6875 	  inner = XEXP (SET_DEST (x), 0);
6876 	  len = INTVAL (XEXP (SET_DEST (x), 1));
6877 	  pos = XEXP (SET_DEST (x), 2);
6878 
6879 	  /* A constant position should stay within the width of INNER.  */
6880 	  if (CONST_INT_P (pos)
6881 	      && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
6882 	    break;
6883 
6884 	  if (BITS_BIG_ENDIAN)
6885 	    {
6886 	      if (CONST_INT_P (pos))
6887 		pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
6888 			       - INTVAL (pos));
6889 	      else if (GET_CODE (pos) == MINUS
6890 		       && CONST_INT_P (XEXP (pos, 1))
6891 		       && (INTVAL (XEXP (pos, 1))
6892 			   == GET_MODE_PRECISION (GET_MODE (inner)) - len))
6893 		/* If position is ADJUST - X, new position is X.  */
6894 		pos = XEXP (pos, 0);
6895 	      else
6896 		pos = simplify_gen_binary (MINUS, GET_MODE (pos),
6897 					   GEN_INT (GET_MODE_PRECISION (
6898 						    GET_MODE (inner))
6899 						    - len),
6900 					   pos);
6901 	    }
6902 	}
6903 
6904       /* A SUBREG between two modes that occupy the same numbers of words
6905 	 can be done by moving the SUBREG to the source.  */
6906       else if (GET_CODE (SET_DEST (x)) == SUBREG
6907 	       /* We need SUBREGs to compute nonzero_bits properly.  */
6908 	       && nonzero_sign_valid
6909 	       && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
6910 		     + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
6911 		   == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
6912 			+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
6913 	{
6914 	  x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
6915 			   gen_lowpart
6916 			   (GET_MODE (SUBREG_REG (SET_DEST (x))),
6917 			    SET_SRC (x)));
6918 	  continue;
6919 	}
6920       else
6921 	break;
6922 
6923       while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
6924 	inner = SUBREG_REG (inner);
6925 
6926       compute_mode = GET_MODE (inner);
6927 
6928       /* Don't attempt bitwise arithmetic on non scalar integer modes.  */
6929       if (! SCALAR_INT_MODE_P (compute_mode))
6930 	{
6931 	  enum machine_mode imode;
6932 
6933 	  /* Don't do anything for vector or complex integral types.  */
6934 	  if (! FLOAT_MODE_P (compute_mode))
6935 	    break;
6936 
6937 	  /* Try to find an integral mode to pun with.  */
6938 	  imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
6939 	  if (imode == BLKmode)
6940 	    break;
6941 
6942 	  compute_mode = imode;
6943 	  inner = gen_lowpart (imode, inner);
6944 	}
6945 
6946       /* Compute a mask of LEN bits, if we can do this on the host machine.  */
6947       if (len >= HOST_BITS_PER_WIDE_INT)
6948 	break;
6949 
6950       /* Now compute the equivalent expression.  Make a copy of INNER
6951 	 for the SET_DEST in case it is a MEM into which we will substitute;
6952 	 we don't want shared RTL in that case.  */
6953       mask = GEN_INT (((unsigned HOST_WIDE_INT) 1 << len) - 1);
6954       cleared = simplify_gen_binary (AND, compute_mode,
6955 				     simplify_gen_unary (NOT, compute_mode,
6956 				       simplify_gen_binary (ASHIFT,
6957 							    compute_mode,
6958 							    mask, pos),
6959 				       compute_mode),
6960 				     inner);
6961       masked = simplify_gen_binary (ASHIFT, compute_mode,
6962 				    simplify_gen_binary (
6963 				      AND, compute_mode,
6964 				      gen_lowpart (compute_mode, SET_SRC (x)),
6965 				      mask),
6966 				    pos);
6967 
6968       x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
6969 		       simplify_gen_binary (IOR, compute_mode,
6970 					    cleared, masked));
6971     }
6972 
6973   return x;
6974 }
6975 
6976 /* Return an RTX for a reference to LEN bits of INNER.  If POS_RTX is nonzero,
6977    it is an RTX that represents the (variable) starting position; otherwise,
6978    POS is the (constant) starting bit position.  Both are counted from the LSB.
6979 
6980    UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
6981 
6982    IN_DEST is nonzero if this is a reference in the destination of a SET.
6983    This is used when a ZERO_ or SIGN_EXTRACT isn't needed.  If nonzero,
6984    a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
6985    be used.
6986 
6987    IN_COMPARE is nonzero if we are in a COMPARE.  This means that a
6988    ZERO_EXTRACT should be built even for bits starting at bit 0.
6989 
6990    MODE is the desired mode of the result (if IN_DEST == 0).
6991 
6992    The result is an RTX for the extraction or NULL_RTX if the target
6993    can't handle it.  */
6994 
6995 static rtx
6996 make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
6997 		 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
6998 		 int in_dest, int in_compare)
6999 {
7000   /* This mode describes the size of the storage area
7001      to fetch the overall value from.  Within that, we
7002      ignore the POS lowest bits, etc.  */
7003   enum machine_mode is_mode = GET_MODE (inner);
7004   enum machine_mode inner_mode;
7005   enum machine_mode wanted_inner_mode;
7006   enum machine_mode wanted_inner_reg_mode = word_mode;
7007   enum machine_mode pos_mode = word_mode;
7008   enum machine_mode extraction_mode = word_mode;
7009   enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7010   rtx new_rtx = 0;
7011   rtx orig_pos_rtx = pos_rtx;
7012   HOST_WIDE_INT orig_pos;
7013 
7014   if (pos_rtx && CONST_INT_P (pos_rtx))
7015     pos = INTVAL (pos_rtx), pos_rtx = 0;
7016 
7017   if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7018     {
7019       /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7020 	 consider just the QI as the memory to extract from.
7021 	 The subreg adds or removes high bits; its mode is
7022 	 irrelevant to the meaning of this extraction,
7023 	 since POS and LEN count from the lsb.  */
7024       if (MEM_P (SUBREG_REG (inner)))
7025 	is_mode = GET_MODE (SUBREG_REG (inner));
7026       inner = SUBREG_REG (inner);
7027     }
7028   else if (GET_CODE (inner) == ASHIFT
7029 	   && CONST_INT_P (XEXP (inner, 1))
7030 	   && pos_rtx == 0 && pos == 0
7031 	   && len > UINTVAL (XEXP (inner, 1)))
7032     {
7033       /* We're extracting the least significant bits of an rtx
7034 	 (ashift X (const_int C)), where LEN > C.  Extract the
7035 	 least significant (LEN - C) bits of X, giving an rtx
7036 	 whose mode is MODE, then shift it left C times.  */
7037       new_rtx = make_extraction (mode, XEXP (inner, 0),
7038 			     0, 0, len - INTVAL (XEXP (inner, 1)),
7039 			     unsignedp, in_dest, in_compare);
7040       if (new_rtx != 0)
7041 	return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7042     }
7043   else if (GET_CODE (inner) == TRUNCATE)
7044     inner = XEXP (inner, 0);
7045 
7046   inner_mode = GET_MODE (inner);
7047 
7048   /* See if this can be done without an extraction.  We never can if the
7049      width of the field is not the same as that of some integer mode. For
7050      registers, we can only avoid the extraction if the position is at the
7051      low-order bit and this is either not in the destination or we have the
7052      appropriate STRICT_LOW_PART operation available.
7053 
7054      For MEM, we can avoid an extract if the field starts on an appropriate
7055      boundary and we can change the mode of the memory reference.  */
7056 
7057   if (tmode != BLKmode
7058       && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7059 	   && !MEM_P (inner)
7060 	   && (inner_mode == tmode
7061 	       || !REG_P (inner)
7062 	       || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7063 	       || reg_truncated_to_mode (tmode, inner))
7064 	   && (! in_dest
7065 	       || (REG_P (inner)
7066 		   && have_insn_for (STRICT_LOW_PART, tmode))))
7067 	  || (MEM_P (inner) && pos_rtx == 0
7068 	      && (pos
7069 		  % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7070 		     : BITS_PER_UNIT)) == 0
7071 	      /* We can't do this if we are widening INNER_MODE (it
7072 		 may not be aligned, for one thing).  */
7073 	      && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7074 	      && (inner_mode == tmode
7075 		  || (! mode_dependent_address_p (XEXP (inner, 0),
7076 						  MEM_ADDR_SPACE (inner))
7077 		      && ! MEM_VOLATILE_P (inner))))))
7078     {
7079       /* If INNER is a MEM, make a new MEM that encompasses just the desired
7080 	 field.  If the original and current mode are the same, we need not
7081 	 adjust the offset.  Otherwise, we do if bytes big endian.
7082 
7083 	 If INNER is not a MEM, get a piece consisting of just the field
7084 	 of interest (in this case POS % BITS_PER_WORD must be 0).  */
7085 
7086       if (MEM_P (inner))
7087 	{
7088 	  HOST_WIDE_INT offset;
7089 
7090 	  /* POS counts from lsb, but make OFFSET count in memory order.  */
7091 	  if (BYTES_BIG_ENDIAN)
7092 	    offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7093 	  else
7094 	    offset = pos / BITS_PER_UNIT;
7095 
7096 	  new_rtx = adjust_address_nv (inner, tmode, offset);
7097 	}
7098       else if (REG_P (inner))
7099 	{
7100 	  if (tmode != inner_mode)
7101 	    {
7102 	      /* We can't call gen_lowpart in a DEST since we
7103 		 always want a SUBREG (see below) and it would sometimes
7104 		 return a new hard register.  */
7105 	      if (pos || in_dest)
7106 		{
7107 		  HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7108 
7109 		  if (WORDS_BIG_ENDIAN
7110 		      && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7111 		    final_word = ((GET_MODE_SIZE (inner_mode)
7112 				   - GET_MODE_SIZE (tmode))
7113 				  / UNITS_PER_WORD) - final_word;
7114 
7115 		  final_word *= UNITS_PER_WORD;
7116 		  if (BYTES_BIG_ENDIAN &&
7117 		      GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7118 		    final_word += (GET_MODE_SIZE (inner_mode)
7119 				   - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7120 
7121 		  /* Avoid creating invalid subregs, for example when
7122 		     simplifying (x>>32)&255.  */
7123 		  if (!validate_subreg (tmode, inner_mode, inner, final_word))
7124 		    return NULL_RTX;
7125 
7126 		  new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7127 		}
7128 	      else
7129 		new_rtx = gen_lowpart (tmode, inner);
7130 	    }
7131 	  else
7132 	    new_rtx = inner;
7133 	}
7134       else
7135 	new_rtx = force_to_mode (inner, tmode,
7136 			     len >= HOST_BITS_PER_WIDE_INT
7137 			     ? ~(unsigned HOST_WIDE_INT) 0
7138 			     : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7139 			     0);
7140 
7141       /* If this extraction is going into the destination of a SET,
7142 	 make a STRICT_LOW_PART unless we made a MEM.  */
7143 
7144       if (in_dest)
7145 	return (MEM_P (new_rtx) ? new_rtx
7146 		: (GET_CODE (new_rtx) != SUBREG
7147 		   ? gen_rtx_CLOBBER (tmode, const0_rtx)
7148 		   : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7149 
7150       if (mode == tmode)
7151 	return new_rtx;
7152 
7153       if (CONST_SCALAR_INT_P (new_rtx))
7154 	return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7155 					 mode, new_rtx, tmode);
7156 
7157       /* If we know that no extraneous bits are set, and that the high
7158 	 bit is not set, convert the extraction to the cheaper of
7159 	 sign and zero extension, that are equivalent in these cases.  */
7160       if (flag_expensive_optimizations
7161 	  && (HWI_COMPUTABLE_MODE_P (tmode)
7162 	      && ((nonzero_bits (new_rtx, tmode)
7163 		   & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7164 		  == 0)))
7165 	{
7166 	  rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7167 	  rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7168 
7169 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7170 	     backends.  */
7171 	  if (set_src_cost (temp, optimize_this_for_speed_p)
7172 	      <= set_src_cost (temp1, optimize_this_for_speed_p))
7173 	    return temp;
7174 	  return temp1;
7175 	}
7176 
7177       /* Otherwise, sign- or zero-extend unless we already are in the
7178 	 proper mode.  */
7179 
7180       return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7181 			     mode, new_rtx));
7182     }
7183 
7184   /* Unless this is a COMPARE or we have a funny memory reference,
7185      don't do anything with zero-extending field extracts starting at
7186      the low-order bit since they are simple AND operations.  */
7187   if (pos_rtx == 0 && pos == 0 && ! in_dest
7188       && ! in_compare && unsignedp)
7189     return 0;
7190 
7191   /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7192      if the position is not a constant and the length is not 1.  In all
7193      other cases, we would only be going outside our object in cases when
7194      an original shift would have been undefined.  */
7195   if (MEM_P (inner)
7196       && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7197 	  || (pos_rtx != 0 && len != 1)))
7198     return 0;
7199 
7200   enum extraction_pattern pattern = (in_dest ? EP_insv
7201 				     : unsignedp ? EP_extzv : EP_extv);
7202 
7203   /* If INNER is not from memory, we want it to have the mode of a register
7204      extraction pattern's structure operand, or word_mode if there is no
7205      such pattern.  The same applies to extraction_mode and pos_mode
7206      and their respective operands.
7207 
7208      For memory, assume that the desired extraction_mode and pos_mode
7209      are the same as for a register operation, since at present we don't
7210      have named patterns for aligned memory structures.  */
7211   struct extraction_insn insn;
7212   if (get_best_reg_extraction_insn (&insn, pattern,
7213 				    GET_MODE_BITSIZE (inner_mode), mode))
7214     {
7215       wanted_inner_reg_mode = insn.struct_mode;
7216       pos_mode = insn.pos_mode;
7217       extraction_mode = insn.field_mode;
7218     }
7219 
7220   /* Never narrow an object, since that might not be safe.  */
7221 
7222   if (mode != VOIDmode
7223       && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7224     extraction_mode = mode;
7225 
7226   if (!MEM_P (inner))
7227     wanted_inner_mode = wanted_inner_reg_mode;
7228   else
7229     {
7230       /* Be careful not to go beyond the extracted object and maintain the
7231 	 natural alignment of the memory.  */
7232       wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7233       while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7234 	     > GET_MODE_BITSIZE (wanted_inner_mode))
7235 	{
7236 	  wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7237 	  gcc_assert (wanted_inner_mode != VOIDmode);
7238 	}
7239     }
7240 
7241   orig_pos = pos;
7242 
7243   if (BITS_BIG_ENDIAN)
7244     {
7245       /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7246 	 BITS_BIG_ENDIAN style.  If position is constant, compute new
7247 	 position.  Otherwise, build subtraction.
7248 	 Note that POS is relative to the mode of the original argument.
7249 	 If it's a MEM we need to recompute POS relative to that.
7250 	 However, if we're extracting from (or inserting into) a register,
7251 	 we want to recompute POS relative to wanted_inner_mode.  */
7252       int width = (MEM_P (inner)
7253 		   ? GET_MODE_BITSIZE (is_mode)
7254 		   : GET_MODE_BITSIZE (wanted_inner_mode));
7255 
7256       if (pos_rtx == 0)
7257 	pos = width - len - pos;
7258       else
7259 	pos_rtx
7260 	  = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
7261       /* POS may be less than 0 now, but we check for that below.
7262 	 Note that it can only be less than 0 if !MEM_P (inner).  */
7263     }
7264 
7265   /* If INNER has a wider mode, and this is a constant extraction, try to
7266      make it smaller and adjust the byte to point to the byte containing
7267      the value.  */
7268   if (wanted_inner_mode != VOIDmode
7269       && inner_mode != wanted_inner_mode
7270       && ! pos_rtx
7271       && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7272       && MEM_P (inner)
7273       && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7274       && ! MEM_VOLATILE_P (inner))
7275     {
7276       int offset = 0;
7277 
7278       /* The computations below will be correct if the machine is big
7279 	 endian in both bits and bytes or little endian in bits and bytes.
7280 	 If it is mixed, we must adjust.  */
7281 
7282       /* If bytes are big endian and we had a paradoxical SUBREG, we must
7283 	 adjust OFFSET to compensate.  */
7284       if (BYTES_BIG_ENDIAN
7285 	  && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7286 	offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7287 
7288       /* We can now move to the desired byte.  */
7289       offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7290 		* GET_MODE_SIZE (wanted_inner_mode);
7291       pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7292 
7293       if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7294 	  && is_mode != wanted_inner_mode)
7295 	offset = (GET_MODE_SIZE (is_mode)
7296 		  - GET_MODE_SIZE (wanted_inner_mode) - offset);
7297 
7298       inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7299     }
7300 
7301   /* If INNER is not memory, get it into the proper mode.  If we are changing
7302      its mode, POS must be a constant and smaller than the size of the new
7303      mode.  */
7304   else if (!MEM_P (inner))
7305     {
7306       /* On the LHS, don't create paradoxical subregs implicitely truncating
7307 	 the register unless TRULY_NOOP_TRUNCATION.  */
7308       if (in_dest
7309 	  && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7310 					     wanted_inner_mode))
7311 	return NULL_RTX;
7312 
7313       if (GET_MODE (inner) != wanted_inner_mode
7314 	  && (pos_rtx != 0
7315 	      || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7316 	return NULL_RTX;
7317 
7318       if (orig_pos < 0)
7319 	return NULL_RTX;
7320 
7321       inner = force_to_mode (inner, wanted_inner_mode,
7322 			     pos_rtx
7323 			     || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7324 			     ? ~(unsigned HOST_WIDE_INT) 0
7325 			     : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7326 				<< orig_pos),
7327 			     0);
7328     }
7329 
7330   /* Adjust mode of POS_RTX, if needed.  If we want a wider mode, we
7331      have to zero extend.  Otherwise, we can just use a SUBREG.  */
7332   if (pos_rtx != 0
7333       && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7334     {
7335       rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);
7336 
7337       /* If we know that no extraneous bits are set, and that the high
7338 	 bit is not set, convert extraction to cheaper one - either
7339 	 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7340 	 cases.  */
7341       if (flag_expensive_optimizations
7342 	  && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7343 	      && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7344 		   & ~(((unsigned HOST_WIDE_INT)
7345 			GET_MODE_MASK (GET_MODE (pos_rtx)))
7346 		       >> 1))
7347 		  == 0)))
7348 	{
7349 	  rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);
7350 
7351 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7352 	     backends.  */
7353 	  if (set_src_cost (temp1, optimize_this_for_speed_p)
7354 	      < set_src_cost (temp, optimize_this_for_speed_p))
7355 	    temp = temp1;
7356 	}
7357       pos_rtx = temp;
7358     }
7359 
7360   /* Make POS_RTX unless we already have it and it is correct.  If we don't
7361      have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7362      be a CONST_INT.  */
7363   if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7364     pos_rtx = orig_pos_rtx;
7365 
7366   else if (pos_rtx == 0)
7367     pos_rtx = GEN_INT (pos);
7368 
7369   /* Make the required operation.  See if we can use existing rtx.  */
7370   new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7371 			 extraction_mode, inner, GEN_INT (len), pos_rtx);
7372   if (! in_dest)
7373     new_rtx = gen_lowpart (mode, new_rtx);
7374 
7375   return new_rtx;
7376 }
7377 
7378 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7379    with any other operations in X.  Return X without that shift if so.  */
7380 
7381 static rtx
7382 extract_left_shift (rtx x, int count)
7383 {
7384   enum rtx_code code = GET_CODE (x);
7385   enum machine_mode mode = GET_MODE (x);
7386   rtx tem;
7387 
7388   switch (code)
7389     {
7390     case ASHIFT:
7391       /* This is the shift itself.  If it is wide enough, we will return
7392 	 either the value being shifted if the shift count is equal to
7393 	 COUNT or a shift for the difference.  */
7394       if (CONST_INT_P (XEXP (x, 1))
7395 	  && INTVAL (XEXP (x, 1)) >= count)
7396 	return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7397 				     INTVAL (XEXP (x, 1)) - count);
7398       break;
7399 
7400     case NEG:  case NOT:
7401       if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7402 	return simplify_gen_unary (code, mode, tem, mode);
7403 
7404       break;
7405 
7406     case PLUS:  case IOR:  case XOR:  case AND:
7407       /* If we can safely shift this constant and we find the inner shift,
7408 	 make a new operation.  */
7409       if (CONST_INT_P (XEXP (x, 1))
7410 	  && (UINTVAL (XEXP (x, 1))
7411 	      & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7412 	  && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7413 	return simplify_gen_binary (code, mode, tem,
7414 				    GEN_INT (INTVAL (XEXP (x, 1)) >> count));
7415 
7416       break;
7417 
7418     default:
7419       break;
7420     }
7421 
7422   return 0;
7423 }
7424 
7425 /* Look at the expression rooted at X.  Look for expressions
7426    equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7427    Form these expressions.
7428 
7429    Return the new rtx, usually just X.
7430 
7431    Also, for machines like the VAX that don't have logical shift insns,
7432    try to convert logical to arithmetic shift operations in cases where
7433    they are equivalent.  This undoes the canonicalizations to logical
7434    shifts done elsewhere.
7435 
7436    We try, as much as possible, to re-use rtl expressions to save memory.
7437 
7438    IN_CODE says what kind of expression we are processing.  Normally, it is
7439    SET.  In a memory address (inside a MEM, PLUS or minus, the latter two
7440    being kludges), it is MEM.  When processing the arguments of a comparison
7441    or a COMPARE against zero, it is COMPARE.  */
7442 
7443 rtx
7444 make_compound_operation (rtx x, enum rtx_code in_code)
7445 {
7446   enum rtx_code code = GET_CODE (x);
7447   enum machine_mode mode = GET_MODE (x);
7448   int mode_width = GET_MODE_PRECISION (mode);
7449   rtx rhs, lhs;
7450   enum rtx_code next_code;
7451   int i, j;
7452   rtx new_rtx = 0;
7453   rtx tem;
7454   const char *fmt;
7455 
7456   /* Select the code to be used in recursive calls.  Once we are inside an
7457      address, we stay there.  If we have a comparison, set to COMPARE,
7458      but once inside, go back to our default of SET.  */
7459 
7460   next_code = (code == MEM ? MEM
7461 	       : ((code == PLUS || code == MINUS)
7462 		  && SCALAR_INT_MODE_P (mode)) ? MEM
7463 	       : ((code == COMPARE || COMPARISON_P (x))
7464 		  && XEXP (x, 1) == const0_rtx) ? COMPARE
7465 	       : in_code == COMPARE ? SET : in_code);
7466 
7467   /* Process depending on the code of this operation.  If NEW is set
7468      nonzero, it will be returned.  */
7469 
7470   switch (code)
7471     {
7472     case ASHIFT:
7473       /* Convert shifts by constants into multiplications if inside
7474 	 an address.  */
7475       if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7476 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7477 	  && INTVAL (XEXP (x, 1)) >= 0
7478 	  && SCALAR_INT_MODE_P (mode))
7479 	{
7480 	  HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7481 	  HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7482 
7483 	  new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7484 	  if (GET_CODE (new_rtx) == NEG)
7485 	    {
7486 	      new_rtx = XEXP (new_rtx, 0);
7487 	      multval = -multval;
7488 	    }
7489 	  multval = trunc_int_for_mode (multval, mode);
7490 	  new_rtx = gen_rtx_MULT (mode, new_rtx, GEN_INT (multval));
7491 	}
7492       break;
7493 
7494     case PLUS:
7495       lhs = XEXP (x, 0);
7496       rhs = XEXP (x, 1);
7497       lhs = make_compound_operation (lhs, next_code);
7498       rhs = make_compound_operation (rhs, next_code);
7499       if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7500 	  && SCALAR_INT_MODE_P (mode))
7501 	{
7502 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7503 				     XEXP (lhs, 1));
7504 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7505 	}
7506       else if (GET_CODE (lhs) == MULT
7507 	       && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7508 	{
7509 	  tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7510 				     simplify_gen_unary (NEG, mode,
7511 							 XEXP (lhs, 1),
7512 							 mode));
7513 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7514 	}
7515       else
7516 	{
7517 	  SUBST (XEXP (x, 0), lhs);
7518 	  SUBST (XEXP (x, 1), rhs);
7519 	  goto maybe_swap;
7520 	}
7521       x = gen_lowpart (mode, new_rtx);
7522       goto maybe_swap;
7523 
7524     case MINUS:
7525       lhs = XEXP (x, 0);
7526       rhs = XEXP (x, 1);
7527       lhs = make_compound_operation (lhs, next_code);
7528       rhs = make_compound_operation (rhs, next_code);
7529       if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7530 	  && SCALAR_INT_MODE_P (mode))
7531 	{
7532 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7533 				     XEXP (rhs, 1));
7534 	  new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7535 	}
7536       else if (GET_CODE (rhs) == MULT
7537 	       && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7538 	{
7539 	  tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7540 				     simplify_gen_unary (NEG, mode,
7541 							 XEXP (rhs, 1),
7542 							 mode));
7543 	  new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7544 	}
7545       else
7546 	{
7547 	  SUBST (XEXP (x, 0), lhs);
7548 	  SUBST (XEXP (x, 1), rhs);
7549 	  return x;
7550 	}
7551       return gen_lowpart (mode, new_rtx);
7552 
7553     case AND:
7554       /* If the second operand is not a constant, we can't do anything
7555 	 with it.  */
7556       if (!CONST_INT_P (XEXP (x, 1)))
7557 	break;
7558 
7559       /* If the constant is a power of two minus one and the first operand
7560 	 is a logical right shift, make an extraction.  */
7561       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7562 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7563 	{
7564 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7565 	  new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7566 				 0, in_code == COMPARE);
7567 	}
7568 
7569       /* Same as previous, but for (subreg (lshiftrt ...)) in first op.  */
7570       else if (GET_CODE (XEXP (x, 0)) == SUBREG
7571 	       && subreg_lowpart_p (XEXP (x, 0))
7572 	       && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7573 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7574 	{
7575 	  new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7576 					 next_code);
7577 	  new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7578 				 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7579 				 0, in_code == COMPARE);
7580 	}
7581       /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)).  */
7582       else if ((GET_CODE (XEXP (x, 0)) == XOR
7583 		|| GET_CODE (XEXP (x, 0)) == IOR)
7584 	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7585 	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7586 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7587 	{
7588 	  /* Apply the distributive law, and then try to make extractions.  */
7589 	  new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7590 				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7591 					     XEXP (x, 1)),
7592 				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7593 					     XEXP (x, 1)));
7594 	  new_rtx = make_compound_operation (new_rtx, in_code);
7595 	}
7596 
7597       /* If we are have (and (rotate X C) M) and C is larger than the number
7598 	 of bits in M, this is an extraction.  */
7599 
7600       else if (GET_CODE (XEXP (x, 0)) == ROTATE
7601 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7602 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7603 	       && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7604 	{
7605 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7606 	  new_rtx = make_extraction (mode, new_rtx,
7607 				 (GET_MODE_PRECISION (mode)
7608 				  - INTVAL (XEXP (XEXP (x, 0), 1))),
7609 				 NULL_RTX, i, 1, 0, in_code == COMPARE);
7610 	}
7611 
7612       /* On machines without logical shifts, if the operand of the AND is
7613 	 a logical shift and our mask turns off all the propagated sign
7614 	 bits, we can replace the logical shift with an arithmetic shift.  */
7615       else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7616 	       && !have_insn_for (LSHIFTRT, mode)
7617 	       && have_insn_for (ASHIFTRT, mode)
7618 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7619 	       && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7620 	       && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7621 	       && mode_width <= HOST_BITS_PER_WIDE_INT)
7622 	{
7623 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7624 
7625 	  mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7626 	  if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7627 	    SUBST (XEXP (x, 0),
7628 		   gen_rtx_ASHIFTRT (mode,
7629 				     make_compound_operation
7630 				     (XEXP (XEXP (x, 0), 0), next_code),
7631 				     XEXP (XEXP (x, 0), 1)));
7632 	}
7633 
7634       /* If the constant is one less than a power of two, this might be
7635 	 representable by an extraction even if no shift is present.
7636 	 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7637 	 we are in a COMPARE.  */
7638       else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7639 	new_rtx = make_extraction (mode,
7640 			       make_compound_operation (XEXP (x, 0),
7641 							next_code),
7642 			       0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7643 
7644       /* If we are in a comparison and this is an AND with a power of two,
7645 	 convert this into the appropriate bit extract.  */
7646       else if (in_code == COMPARE
7647 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7648 	new_rtx = make_extraction (mode,
7649 			       make_compound_operation (XEXP (x, 0),
7650 							next_code),
7651 			       i, NULL_RTX, 1, 1, 0, 1);
7652 
7653       break;
7654 
7655     case LSHIFTRT:
7656       /* If the sign bit is known to be zero, replace this with an
7657 	 arithmetic shift.  */
7658       if (have_insn_for (ASHIFTRT, mode)
7659 	  && ! have_insn_for (LSHIFTRT, mode)
7660 	  && mode_width <= HOST_BITS_PER_WIDE_INT
7661 	  && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7662 	{
7663 	  new_rtx = gen_rtx_ASHIFTRT (mode,
7664 				  make_compound_operation (XEXP (x, 0),
7665 							   next_code),
7666 				  XEXP (x, 1));
7667 	  break;
7668 	}
7669 
7670       /* ... fall through ...  */
7671 
7672     case ASHIFTRT:
7673       lhs = XEXP (x, 0);
7674       rhs = XEXP (x, 1);
7675 
7676       /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7677 	 this is a SIGN_EXTRACT.  */
7678       if (CONST_INT_P (rhs)
7679 	  && GET_CODE (lhs) == ASHIFT
7680 	  && CONST_INT_P (XEXP (lhs, 1))
7681 	  && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7682 	  && INTVAL (XEXP (lhs, 1)) >= 0
7683 	  && INTVAL (rhs) < mode_width)
7684 	{
7685 	  new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7686 	  new_rtx = make_extraction (mode, new_rtx,
7687 				 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7688 				 NULL_RTX, mode_width - INTVAL (rhs),
7689 				 code == LSHIFTRT, 0, in_code == COMPARE);
7690 	  break;
7691 	}
7692 
7693       /* See if we have operations between an ASHIFTRT and an ASHIFT.
7694 	 If so, try to merge the shifts into a SIGN_EXTEND.  We could
7695 	 also do this for some cases of SIGN_EXTRACT, but it doesn't
7696 	 seem worth the effort; the case checked for occurs on Alpha.  */
7697 
7698       if (!OBJECT_P (lhs)
7699 	  && ! (GET_CODE (lhs) == SUBREG
7700 		&& (OBJECT_P (SUBREG_REG (lhs))))
7701 	  && CONST_INT_P (rhs)
7702 	  && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
7703 	  && INTVAL (rhs) < mode_width
7704 	  && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
7705 	new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
7706 			       0, NULL_RTX, mode_width - INTVAL (rhs),
7707 			       code == LSHIFTRT, 0, in_code == COMPARE);
7708 
7709       break;
7710 
7711     case SUBREG:
7712       /* Call ourselves recursively on the inner expression.  If we are
7713 	 narrowing the object and it has a different RTL code from
7714 	 what it originally did, do this SUBREG as a force_to_mode.  */
7715       {
7716 	rtx inner = SUBREG_REG (x), simplified;
7717 	enum rtx_code subreg_code = in_code;
7718 
7719 	/* If in_code is COMPARE, it isn't always safe to pass it through
7720 	   to the recursive make_compound_operation call.  */
7721 	if (subreg_code == COMPARE
7722 	    && (!subreg_lowpart_p (x)
7723 		|| GET_CODE (inner) == SUBREG
7724 		/* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
7725 		   is (const_int 0), rather than
7726 		   (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).  */
7727 		|| (GET_CODE (inner) == AND
7728 		    && CONST_INT_P (XEXP (inner, 1))
7729 		    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7730 		    && exact_log2 (UINTVAL (XEXP (inner, 1)))
7731 		       >= GET_MODE_BITSIZE (mode))))
7732 	  subreg_code = SET;
7733 
7734 	tem = make_compound_operation (inner, subreg_code);
7735 
7736 	simplified
7737 	  = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
7738 	if (simplified)
7739 	  tem = simplified;
7740 
7741 	if (GET_CODE (tem) != GET_CODE (inner)
7742 	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
7743 	    && subreg_lowpart_p (x))
7744 	  {
7745 	    rtx newer
7746 	      = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
7747 
7748 	    /* If we have something other than a SUBREG, we might have
7749 	       done an expansion, so rerun ourselves.  */
7750 	    if (GET_CODE (newer) != SUBREG)
7751 	      newer = make_compound_operation (newer, in_code);
7752 
7753 	    /* force_to_mode can expand compounds.  If it just re-expanded the
7754 	       compound, use gen_lowpart to convert to the desired mode.  */
7755 	    if (rtx_equal_p (newer, x)
7756 		/* Likewise if it re-expanded the compound only partially.
7757 		   This happens for SUBREG of ZERO_EXTRACT if they extract
7758 		   the same number of bits.  */
7759 		|| (GET_CODE (newer) == SUBREG
7760 		    && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
7761 			|| GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
7762 		    && GET_CODE (inner) == AND
7763 		    && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
7764 	      return gen_lowpart (GET_MODE (x), tem);
7765 
7766 	    return newer;
7767 	  }
7768 
7769 	if (simplified)
7770 	  return tem;
7771       }
7772       break;
7773 
7774     default:
7775       break;
7776     }
7777 
7778   if (new_rtx)
7779     {
7780       x = gen_lowpart (mode, new_rtx);
7781       code = GET_CODE (x);
7782     }
7783 
7784   /* Now recursively process each operand of this operation.  We need to
7785      handle ZERO_EXTEND specially so that we don't lose track of the
7786      inner mode.  */
7787   if (GET_CODE (x) == ZERO_EXTEND)
7788     {
7789       new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7790       tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
7791 					    new_rtx, GET_MODE (XEXP (x, 0)));
7792       if (tem)
7793 	return tem;
7794       SUBST (XEXP (x, 0), new_rtx);
7795       return x;
7796     }
7797 
7798   fmt = GET_RTX_FORMAT (code);
7799   for (i = 0; i < GET_RTX_LENGTH (code); i++)
7800     if (fmt[i] == 'e')
7801       {
7802 	new_rtx = make_compound_operation (XEXP (x, i), next_code);
7803 	SUBST (XEXP (x, i), new_rtx);
7804       }
7805     else if (fmt[i] == 'E')
7806       for (j = 0; j < XVECLEN (x, i); j++)
7807 	{
7808 	  new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
7809 	  SUBST (XVECEXP (x, i, j), new_rtx);
7810 	}
7811 
7812  maybe_swap:
7813   /* If this is a commutative operation, the changes to the operands
7814      may have made it noncanonical.  */
7815   if (COMMUTATIVE_ARITH_P (x)
7816       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
7817     {
7818       tem = XEXP (x, 0);
7819       SUBST (XEXP (x, 0), XEXP (x, 1));
7820       SUBST (XEXP (x, 1), tem);
7821     }
7822 
7823   return x;
7824 }
7825 
7826 /* Given M see if it is a value that would select a field of bits
7827    within an item, but not the entire word.  Return -1 if not.
7828    Otherwise, return the starting position of the field, where 0 is the
7829    low-order bit.
7830 
7831    *PLEN is set to the length of the field.  */
7832 
7833 static int
7834 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
7835 {
7836   /* Get the bit number of the first 1 bit from the right, -1 if none.  */
7837   int pos = m ? ctz_hwi (m) : -1;
7838   int len = 0;
7839 
7840   if (pos >= 0)
7841     /* Now shift off the low-order zero bits and see if we have a
7842        power of two minus 1.  */
7843     len = exact_log2 ((m >> pos) + 1);
7844 
7845   if (len <= 0)
7846     pos = -1;
7847 
7848   *plen = len;
7849   return pos;
7850 }
7851 
7852 /* If X refers to a register that equals REG in value, replace these
7853    references with REG.  */
7854 static rtx
7855 canon_reg_for_combine (rtx x, rtx reg)
7856 {
7857   rtx op0, op1, op2;
7858   const char *fmt;
7859   int i;
7860   bool copied;
7861 
7862   enum rtx_code code = GET_CODE (x);
7863   switch (GET_RTX_CLASS (code))
7864     {
7865     case RTX_UNARY:
7866       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7867       if (op0 != XEXP (x, 0))
7868 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
7869 				   GET_MODE (reg));
7870       break;
7871 
7872     case RTX_BIN_ARITH:
7873     case RTX_COMM_ARITH:
7874       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7875       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7876       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7877 	return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
7878       break;
7879 
7880     case RTX_COMPARE:
7881     case RTX_COMM_COMPARE:
7882       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7883       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7884       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
7885 	return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
7886 					GET_MODE (op0), op0, op1);
7887       break;
7888 
7889     case RTX_TERNARY:
7890     case RTX_BITFIELD_OPS:
7891       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
7892       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
7893       op2 = canon_reg_for_combine (XEXP (x, 2), reg);
7894       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
7895 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
7896 				     GET_MODE (op0), op0, op1, op2);
7897 
7898     case RTX_OBJ:
7899       if (REG_P (x))
7900 	{
7901 	  if (rtx_equal_p (get_last_value (reg), x)
7902 	      || rtx_equal_p (reg, get_last_value (x)))
7903 	    return reg;
7904 	  else
7905 	    break;
7906 	}
7907 
7908       /* fall through */
7909 
7910     default:
7911       fmt = GET_RTX_FORMAT (code);
7912       copied = false;
7913       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7914 	if (fmt[i] == 'e')
7915 	  {
7916 	    rtx op = canon_reg_for_combine (XEXP (x, i), reg);
7917 	    if (op != XEXP (x, i))
7918 	      {
7919 		if (!copied)
7920 		  {
7921 		    copied = true;
7922 		    x = copy_rtx (x);
7923 		  }
7924 		XEXP (x, i) = op;
7925 	      }
7926 	  }
7927 	else if (fmt[i] == 'E')
7928 	  {
7929 	    int j;
7930 	    for (j = 0; j < XVECLEN (x, i); j++)
7931 	      {
7932 		rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
7933 		if (op != XVECEXP (x, i, j))
7934 		  {
7935 		    if (!copied)
7936 		      {
7937 			copied = true;
7938 			x = copy_rtx (x);
7939 		      }
7940 		    XVECEXP (x, i, j) = op;
7941 		  }
7942 	      }
7943 	  }
7944 
7945       break;
7946     }
7947 
7948   return x;
7949 }
7950 
7951 /* Return X converted to MODE.  If the value is already truncated to
7952    MODE we can just return a subreg even though in the general case we
7953    would need an explicit truncation.  */
7954 
7955 static rtx
7956 gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
7957 {
7958   if (!CONST_INT_P (x)
7959       && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
7960       && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
7961       && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
7962     {
7963       /* Bit-cast X into an integer mode.  */
7964       if (!SCALAR_INT_MODE_P (GET_MODE (x)))
7965 	x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
7966       x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
7967 			      x, GET_MODE (x));
7968     }
7969 
7970   return gen_lowpart (mode, x);
7971 }
7972 
7973 /* See if X can be simplified knowing that we will only refer to it in
7974    MODE and will only refer to those bits that are nonzero in MASK.
7975    If other bits are being computed or if masking operations are done
7976    that select a superset of the bits in MASK, they can sometimes be
7977    ignored.
7978 
7979    Return a possibly simplified expression, but always convert X to
7980    MODE.  If X is a CONST_INT, AND the CONST_INT with MASK.
7981 
7982    If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
7983    are all off in X.  This is used when X will be complemented, by either
7984    NOT, NEG, or XOR.  */
7985 
7986 static rtx
7987 force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
7988 	       int just_select)
7989 {
7990   enum rtx_code code = GET_CODE (x);
7991   int next_select = just_select || code == XOR || code == NOT || code == NEG;
7992   enum machine_mode op_mode;
7993   unsigned HOST_WIDE_INT fuller_mask, nonzero;
7994   rtx op0, op1, temp;
7995 
7996   /* If this is a CALL or ASM_OPERANDS, don't do anything.  Some of the
7997      code below will do the wrong thing since the mode of such an
7998      expression is VOIDmode.
7999 
8000      Also do nothing if X is a CLOBBER; this can happen if X was
8001      the return value from a call to gen_lowpart.  */
8002   if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8003     return x;
8004 
8005   /* We want to perform the operation in its present mode unless we know
8006      that the operation is valid in MODE, in which case we do the operation
8007      in MODE.  */
8008   op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8009 	      && have_insn_for (code, mode))
8010 	     ? mode : GET_MODE (x));
8011 
8012   /* It is not valid to do a right-shift in a narrower mode
8013      than the one it came in with.  */
8014   if ((code == LSHIFTRT || code == ASHIFTRT)
8015       && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8016     op_mode = GET_MODE (x);
8017 
8018   /* Truncate MASK to fit OP_MODE.  */
8019   if (op_mode)
8020     mask &= GET_MODE_MASK (op_mode);
8021 
8022   /* When we have an arithmetic operation, or a shift whose count we
8023      do not know, we need to assume that all bits up to the highest-order
8024      bit in MASK will be needed.  This is how we form such a mask.  */
8025   if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8026     fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8027   else
8028     fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8029 		   - 1);
8030 
8031   /* Determine what bits of X are guaranteed to be (non)zero.  */
8032   nonzero = nonzero_bits (x, mode);
8033 
8034   /* If none of the bits in X are needed, return a zero.  */
8035   if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8036     x = const0_rtx;
8037 
8038   /* If X is a CONST_INT, return a new one.  Do this here since the
8039      test below will fail.  */
8040   if (CONST_INT_P (x))
8041     {
8042       if (SCALAR_INT_MODE_P (mode))
8043 	return gen_int_mode (INTVAL (x) & mask, mode);
8044       else
8045 	{
8046 	  x = GEN_INT (INTVAL (x) & mask);
8047 	  return gen_lowpart_common (mode, x);
8048 	}
8049     }
8050 
8051   /* If X is narrower than MODE and we want all the bits in X's mode, just
8052      get X in the proper mode.  */
8053   if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8054       && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8055     return gen_lowpart (mode, x);
8056 
8057   /* We can ignore the effect of a SUBREG if it narrows the mode or
8058      if the constant masks to zero all the bits the mode doesn't have.  */
8059   if (GET_CODE (x) == SUBREG
8060       && subreg_lowpart_p (x)
8061       && ((GET_MODE_SIZE (GET_MODE (x))
8062 	   < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8063 	  || (0 == (mask
8064 		    & GET_MODE_MASK (GET_MODE (x))
8065 		    & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8066     return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8067 
8068   /* The arithmetic simplifications here only work for scalar integer modes.  */
8069   if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8070     return gen_lowpart_or_truncate (mode, x);
8071 
8072   switch (code)
8073     {
8074     case CLOBBER:
8075       /* If X is a (clobber (const_int)), return it since we know we are
8076 	 generating something that won't match.  */
8077       return x;
8078 
8079     case SIGN_EXTEND:
8080     case ZERO_EXTEND:
8081     case ZERO_EXTRACT:
8082     case SIGN_EXTRACT:
8083       x = expand_compound_operation (x);
8084       if (GET_CODE (x) != code)
8085 	return force_to_mode (x, mode, mask, next_select);
8086       break;
8087 
8088     case TRUNCATE:
8089       /* Similarly for a truncate.  */
8090       return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8091 
8092     case AND:
8093       /* If this is an AND with a constant, convert it into an AND
8094 	 whose constant is the AND of that constant with MASK.  If it
8095 	 remains an AND of MASK, delete it since it is redundant.  */
8096 
8097       if (CONST_INT_P (XEXP (x, 1)))
8098 	{
8099 	  x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8100 				      mask & INTVAL (XEXP (x, 1)));
8101 
8102 	  /* If X is still an AND, see if it is an AND with a mask that
8103 	     is just some low-order bits.  If so, and it is MASK, we don't
8104 	     need it.  */
8105 
8106 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8107 	      && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8108 		  == mask))
8109 	    x = XEXP (x, 0);
8110 
8111 	  /* If it remains an AND, try making another AND with the bits
8112 	     in the mode mask that aren't in MASK turned on.  If the
8113 	     constant in the AND is wide enough, this might make a
8114 	     cheaper constant.  */
8115 
8116 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8117 	      && GET_MODE_MASK (GET_MODE (x)) != mask
8118 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8119 	    {
8120 	      unsigned HOST_WIDE_INT cval
8121 		= UINTVAL (XEXP (x, 1))
8122 		  | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8123 	      int width = GET_MODE_PRECISION (GET_MODE (x));
8124 	      rtx y;
8125 
8126 	      /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
8127 		 number, sign extend it.  */
8128 	      if (width > 0 && width < HOST_BITS_PER_WIDE_INT
8129 		  && (cval & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8130 		cval |= (unsigned HOST_WIDE_INT) -1 << width;
8131 
8132 	      y = simplify_gen_binary (AND, GET_MODE (x),
8133 				       XEXP (x, 0), GEN_INT (cval));
8134 	      if (set_src_cost (y, optimize_this_for_speed_p)
8135 	          < set_src_cost (x, optimize_this_for_speed_p))
8136 		x = y;
8137 	    }
8138 
8139 	  break;
8140 	}
8141 
8142       goto binop;
8143 
8144     case PLUS:
8145       /* In (and (plus FOO C1) M), if M is a mask that just turns off
8146 	 low-order bits (as in an alignment operation) and FOO is already
8147 	 aligned to that boundary, mask C1 to that boundary as well.
8148 	 This may eliminate that PLUS and, later, the AND.  */
8149 
8150       {
8151 	unsigned int width = GET_MODE_PRECISION (mode);
8152 	unsigned HOST_WIDE_INT smask = mask;
8153 
8154 	/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8155 	   number, sign extend it.  */
8156 
8157 	if (width < HOST_BITS_PER_WIDE_INT
8158 	    && (smask & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
8159 	  smask |= (unsigned HOST_WIDE_INT) (-1) << width;
8160 
8161 	if (CONST_INT_P (XEXP (x, 1))
8162 	    && exact_log2 (- smask) >= 0
8163 	    && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8164 	    && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8165 	  return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8166 					       (INTVAL (XEXP (x, 1)) & smask)),
8167 				mode, smask, next_select);
8168       }
8169 
8170       /* ... fall through ...  */
8171 
8172     case MULT:
8173       /* For PLUS, MINUS and MULT, we need any bits less significant than the
8174 	 most significant bit in MASK since carries from those bits will
8175 	 affect the bits we are interested in.  */
8176       mask = fuller_mask;
8177       goto binop;
8178 
8179     case MINUS:
8180       /* If X is (minus C Y) where C's least set bit is larger than any bit
8181 	 in the mask, then we may replace with (neg Y).  */
8182       if (CONST_INT_P (XEXP (x, 0))
8183 	  && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
8184 					& -INTVAL (XEXP (x, 0))))
8185 	      > mask))
8186 	{
8187 	  x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8188 				  GET_MODE (x));
8189 	  return force_to_mode (x, mode, mask, next_select);
8190 	}
8191 
8192       /* Similarly, if C contains every bit in the fuller_mask, then we may
8193 	 replace with (not Y).  */
8194       if (CONST_INT_P (XEXP (x, 0))
8195 	  && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8196 	{
8197 	  x = simplify_gen_unary (NOT, GET_MODE (x),
8198 				  XEXP (x, 1), GET_MODE (x));
8199 	  return force_to_mode (x, mode, mask, next_select);
8200 	}
8201 
8202       mask = fuller_mask;
8203       goto binop;
8204 
8205     case IOR:
8206     case XOR:
8207       /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8208 	 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8209 	 operation which may be a bitfield extraction.  Ensure that the
8210 	 constant we form is not wider than the mode of X.  */
8211 
8212       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8213 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8214 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8215 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8216 	  && CONST_INT_P (XEXP (x, 1))
8217 	  && ((INTVAL (XEXP (XEXP (x, 0), 1))
8218 	       + floor_log2 (INTVAL (XEXP (x, 1))))
8219 	      < GET_MODE_PRECISION (GET_MODE (x)))
8220 	  && (UINTVAL (XEXP (x, 1))
8221 	      & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8222 	{
8223 	  temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
8224 			  << INTVAL (XEXP (XEXP (x, 0), 1)));
8225 	  temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8226 				      XEXP (XEXP (x, 0), 0), temp);
8227 	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8228 				   XEXP (XEXP (x, 0), 1));
8229 	  return force_to_mode (x, mode, mask, next_select);
8230 	}
8231 
8232     binop:
8233       /* For most binary operations, just propagate into the operation and
8234 	 change the mode if we have an operation of that mode.  */
8235 
8236       op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8237       op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8238 
8239       /* If we ended up truncating both operands, truncate the result of the
8240 	 operation instead.  */
8241       if (GET_CODE (op0) == TRUNCATE
8242 	  && GET_CODE (op1) == TRUNCATE)
8243 	{
8244 	  op0 = XEXP (op0, 0);
8245 	  op1 = XEXP (op1, 0);
8246 	}
8247 
8248       op0 = gen_lowpart_or_truncate (op_mode, op0);
8249       op1 = gen_lowpart_or_truncate (op_mode, op1);
8250 
8251       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8252 	x = simplify_gen_binary (code, op_mode, op0, op1);
8253       break;
8254 
8255     case ASHIFT:
8256       /* For left shifts, do the same, but just for the first operand.
8257 	 However, we cannot do anything with shifts where we cannot
8258 	 guarantee that the counts are smaller than the size of the mode
8259 	 because such a count will have a different meaning in a
8260 	 wider mode.  */
8261 
8262       if (! (CONST_INT_P (XEXP (x, 1))
8263 	     && INTVAL (XEXP (x, 1)) >= 0
8264 	     && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8265 	  && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8266 		&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8267 		    < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8268 	break;
8269 
8270       /* If the shift count is a constant and we can do arithmetic in
8271 	 the mode of the shift, refine which bits we need.  Otherwise, use the
8272 	 conservative form of the mask.  */
8273       if (CONST_INT_P (XEXP (x, 1))
8274 	  && INTVAL (XEXP (x, 1)) >= 0
8275 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8276 	  && HWI_COMPUTABLE_MODE_P (op_mode))
8277 	mask >>= INTVAL (XEXP (x, 1));
8278       else
8279 	mask = fuller_mask;
8280 
8281       op0 = gen_lowpart_or_truncate (op_mode,
8282 				     force_to_mode (XEXP (x, 0), op_mode,
8283 						    mask, next_select));
8284 
8285       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8286 	x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8287       break;
8288 
8289     case LSHIFTRT:
8290       /* Here we can only do something if the shift count is a constant,
8291 	 this shift constant is valid for the host, and we can do arithmetic
8292 	 in OP_MODE.  */
8293 
8294       if (CONST_INT_P (XEXP (x, 1))
8295 	  && INTVAL (XEXP (x, 1)) >= 0
8296 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8297 	  && HWI_COMPUTABLE_MODE_P (op_mode))
8298 	{
8299 	  rtx inner = XEXP (x, 0);
8300 	  unsigned HOST_WIDE_INT inner_mask;
8301 
8302 	  /* Select the mask of the bits we need for the shift operand.  */
8303 	  inner_mask = mask << INTVAL (XEXP (x, 1));
8304 
8305 	  /* We can only change the mode of the shift if we can do arithmetic
8306 	     in the mode of the shift and INNER_MASK is no wider than the
8307 	     width of X's mode.  */
8308 	  if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8309 	    op_mode = GET_MODE (x);
8310 
8311 	  inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8312 
8313 	  if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8314 	    x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8315 	}
8316 
8317       /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8318 	 shift and AND produces only copies of the sign bit (C2 is one less
8319 	 than a power of two), we can do this with just a shift.  */
8320 
8321       if (GET_CODE (x) == LSHIFTRT
8322 	  && CONST_INT_P (XEXP (x, 1))
8323 	  /* The shift puts one of the sign bit copies in the least significant
8324 	     bit.  */
8325 	  && ((INTVAL (XEXP (x, 1))
8326 	       + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8327 	      >= GET_MODE_PRECISION (GET_MODE (x)))
8328 	  && exact_log2 (mask + 1) >= 0
8329 	  /* Number of bits left after the shift must be more than the mask
8330 	     needs.  */
8331 	  && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8332 	      <= GET_MODE_PRECISION (GET_MODE (x)))
8333 	  /* Must be more sign bit copies than the mask needs.  */
8334 	  && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8335 	      >= exact_log2 (mask + 1)))
8336 	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8337 				 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8338 					  - exact_log2 (mask + 1)));
8339 
8340       goto shiftrt;
8341 
8342     case ASHIFTRT:
8343       /* If we are just looking for the sign bit, we don't need this shift at
8344 	 all, even if it has a variable count.  */
8345       if (val_signbit_p (GET_MODE (x), mask))
8346 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8347 
8348       /* If this is a shift by a constant, get a mask that contains those bits
8349 	 that are not copies of the sign bit.  We then have two cases:  If
8350 	 MASK only includes those bits, this can be a logical shift, which may
8351 	 allow simplifications.  If MASK is a single-bit field not within
8352 	 those bits, we are requesting a copy of the sign bit and hence can
8353 	 shift the sign bit to the appropriate location.  */
8354 
8355       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8356 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8357 	{
8358 	  int i;
8359 
8360 	  /* If the considered data is wider than HOST_WIDE_INT, we can't
8361 	     represent a mask for all its bits in a single scalar.
8362 	     But we only care about the lower bits, so calculate these.  */
8363 
8364 	  if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8365 	    {
8366 	      nonzero = ~(unsigned HOST_WIDE_INT) 0;
8367 
8368 	      /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8369 		 is the number of bits a full-width mask would have set.
8370 		 We need only shift if these are fewer than nonzero can
8371 		 hold.  If not, we must keep all bits set in nonzero.  */
8372 
8373 	      if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8374 		  < HOST_BITS_PER_WIDE_INT)
8375 		nonzero >>= INTVAL (XEXP (x, 1))
8376 			    + HOST_BITS_PER_WIDE_INT
8377 			    - GET_MODE_PRECISION (GET_MODE (x)) ;
8378 	    }
8379 	  else
8380 	    {
8381 	      nonzero = GET_MODE_MASK (GET_MODE (x));
8382 	      nonzero >>= INTVAL (XEXP (x, 1));
8383 	    }
8384 
8385 	  if ((mask & ~nonzero) == 0)
8386 	    {
8387 	      x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8388 					XEXP (x, 0), INTVAL (XEXP (x, 1)));
8389 	      if (GET_CODE (x) != ASHIFTRT)
8390 		return force_to_mode (x, mode, mask, next_select);
8391 	    }
8392 
8393 	  else if ((i = exact_log2 (mask)) >= 0)
8394 	    {
8395 	      x = simplify_shift_const
8396 		  (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8397 		   GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8398 
8399 	      if (GET_CODE (x) != ASHIFTRT)
8400 		return force_to_mode (x, mode, mask, next_select);
8401 	    }
8402 	}
8403 
8404       /* If MASK is 1, convert this to an LSHIFTRT.  This can be done
8405 	 even if the shift count isn't a constant.  */
8406       if (mask == 1)
8407 	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8408 				 XEXP (x, 0), XEXP (x, 1));
8409 
8410     shiftrt:
8411 
8412       /* If this is a zero- or sign-extension operation that just affects bits
8413 	 we don't care about, remove it.  Be sure the call above returned
8414 	 something that is still a shift.  */
8415 
8416       if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8417 	  && CONST_INT_P (XEXP (x, 1))
8418 	  && INTVAL (XEXP (x, 1)) >= 0
8419 	  && (INTVAL (XEXP (x, 1))
8420 	      <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8421 	  && GET_CODE (XEXP (x, 0)) == ASHIFT
8422 	  && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8423 	return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8424 			      next_select);
8425 
8426       break;
8427 
8428     case ROTATE:
8429     case ROTATERT:
8430       /* If the shift count is constant and we can do computations
8431 	 in the mode of X, compute where the bits we care about are.
8432 	 Otherwise, we can't do anything.  Don't change the mode of
8433 	 the shift or propagate MODE into the shift, though.  */
8434       if (CONST_INT_P (XEXP (x, 1))
8435 	  && INTVAL (XEXP (x, 1)) >= 0)
8436 	{
8437 	  temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8438 					    GET_MODE (x), GEN_INT (mask),
8439 					    XEXP (x, 1));
8440 	  if (temp && CONST_INT_P (temp))
8441 	    x = simplify_gen_binary (code, GET_MODE (x),
8442 				     force_to_mode (XEXP (x, 0), GET_MODE (x),
8443 						    INTVAL (temp), next_select),
8444 				     XEXP (x, 1));
8445 	}
8446       break;
8447 
8448     case NEG:
8449       /* If we just want the low-order bit, the NEG isn't needed since it
8450 	 won't change the low-order bit.  */
8451       if (mask == 1)
8452 	return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8453 
8454       /* We need any bits less significant than the most significant bit in
8455 	 MASK since carries from those bits will affect the bits we are
8456 	 interested in.  */
8457       mask = fuller_mask;
8458       goto unop;
8459 
8460     case NOT:
8461       /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8462 	 same as the XOR case above.  Ensure that the constant we form is not
8463 	 wider than the mode of X.  */
8464 
8465       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8466 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8467 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8468 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8469 	      < GET_MODE_PRECISION (GET_MODE (x)))
8470 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8471 	{
8472 	  temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8473 			       GET_MODE (x));
8474 	  temp = simplify_gen_binary (XOR, GET_MODE (x),
8475 				      XEXP (XEXP (x, 0), 0), temp);
8476 	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8477 				   temp, XEXP (XEXP (x, 0), 1));
8478 
8479 	  return force_to_mode (x, mode, mask, next_select);
8480 	}
8481 
8482       /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8483 	 use the full mask inside the NOT.  */
8484       mask = fuller_mask;
8485 
8486     unop:
8487       op0 = gen_lowpart_or_truncate (op_mode,
8488 				     force_to_mode (XEXP (x, 0), mode, mask,
8489 						    next_select));
8490       if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8491 	x = simplify_gen_unary (code, op_mode, op0, op_mode);
8492       break;
8493 
8494     case NE:
8495       /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8496 	 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8497 	 which is equal to STORE_FLAG_VALUE.  */
8498       if ((mask & ~STORE_FLAG_VALUE) == 0
8499 	  && XEXP (x, 1) == const0_rtx
8500 	  && GET_MODE (XEXP (x, 0)) == mode
8501 	  && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8502 	  && (nonzero_bits (XEXP (x, 0), mode)
8503 	      == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8504 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8505 
8506       break;
8507 
8508     case IF_THEN_ELSE:
8509       /* We have no way of knowing if the IF_THEN_ELSE can itself be
8510 	 written in a narrower mode.  We play it safe and do not do so.  */
8511 
8512       op0 = gen_lowpart_or_truncate (GET_MODE (x),
8513 				     force_to_mode (XEXP (x, 1), mode,
8514 						    mask, next_select));
8515       op1 = gen_lowpart_or_truncate (GET_MODE (x),
8516 				     force_to_mode (XEXP (x, 2), mode,
8517 						    mask, next_select));
8518       if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
8519 	x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
8520 				  GET_MODE (XEXP (x, 0)), XEXP (x, 0),
8521 				  op0, op1);
8522       break;
8523 
8524     default:
8525       break;
8526     }
8527 
8528   /* Ensure we return a value of the proper mode.  */
8529   return gen_lowpart_or_truncate (mode, x);
8530 }
8531 
8532 /* Return nonzero if X is an expression that has one of two values depending on
8533    whether some other value is zero or nonzero.  In that case, we return the
8534    value that is being tested, *PTRUE is set to the value if the rtx being
8535    returned has a nonzero value, and *PFALSE is set to the other alternative.
8536 
8537    If we return zero, we set *PTRUE and *PFALSE to X.  */
8538 
8539 static rtx
8540 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8541 {
8542   enum machine_mode mode = GET_MODE (x);
8543   enum rtx_code code = GET_CODE (x);
8544   rtx cond0, cond1, true0, true1, false0, false1;
8545   unsigned HOST_WIDE_INT nz;
8546 
8547   /* If we are comparing a value against zero, we are done.  */
8548   if ((code == NE || code == EQ)
8549       && XEXP (x, 1) == const0_rtx)
8550     {
8551       *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8552       *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8553       return XEXP (x, 0);
8554     }
8555 
8556   /* If this is a unary operation whose operand has one of two values, apply
8557      our opcode to compute those values.  */
8558   else if (UNARY_P (x)
8559 	   && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8560     {
8561       *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8562       *pfalse = simplify_gen_unary (code, mode, false0,
8563 				    GET_MODE (XEXP (x, 0)));
8564       return cond0;
8565     }
8566 
8567   /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8568      make can't possibly match and would suppress other optimizations.  */
8569   else if (code == COMPARE)
8570     ;
8571 
8572   /* If this is a binary operation, see if either side has only one of two
8573      values.  If either one does or if both do and they are conditional on
8574      the same value, compute the new true and false values.  */
8575   else if (BINARY_P (x))
8576     {
8577       cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8578       cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8579 
8580       if ((cond0 != 0 || cond1 != 0)
8581 	  && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8582 	{
8583 	  /* If if_then_else_cond returned zero, then true/false are the
8584 	     same rtl.  We must copy one of them to prevent invalid rtl
8585 	     sharing.  */
8586 	  if (cond0 == 0)
8587 	    true0 = copy_rtx (true0);
8588 	  else if (cond1 == 0)
8589 	    true1 = copy_rtx (true1);
8590 
8591 	  if (COMPARISON_P (x))
8592 	    {
8593 	      *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8594 						true0, true1);
8595 	      *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8596 						 false0, false1);
8597 	     }
8598 	  else
8599 	    {
8600 	      *ptrue = simplify_gen_binary (code, mode, true0, true1);
8601 	      *pfalse = simplify_gen_binary (code, mode, false0, false1);
8602 	    }
8603 
8604 	  return cond0 ? cond0 : cond1;
8605 	}
8606 
8607       /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8608 	 operands is zero when the other is nonzero, and vice-versa,
8609 	 and STORE_FLAG_VALUE is 1 or -1.  */
8610 
8611       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8612 	  && (code == PLUS || code == IOR || code == XOR || code == MINUS
8613 	      || code == UMAX)
8614 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8615 	{
8616 	  rtx op0 = XEXP (XEXP (x, 0), 1);
8617 	  rtx op1 = XEXP (XEXP (x, 1), 1);
8618 
8619 	  cond0 = XEXP (XEXP (x, 0), 0);
8620 	  cond1 = XEXP (XEXP (x, 1), 0);
8621 
8622 	  if (COMPARISON_P (cond0)
8623 	      && COMPARISON_P (cond1)
8624 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8625 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8626 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8627 		  || ((swap_condition (GET_CODE (cond0))
8628 		       == reversed_comparison_code (cond1, NULL))
8629 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8630 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8631 	      && ! side_effects_p (x))
8632 	    {
8633 	      *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8634 	      *pfalse = simplify_gen_binary (MULT, mode,
8635 					     (code == MINUS
8636 					      ? simplify_gen_unary (NEG, mode,
8637 								    op1, mode)
8638 					      : op1),
8639 					      const_true_rtx);
8640 	      return cond0;
8641 	    }
8642 	}
8643 
8644       /* Similarly for MULT, AND and UMIN, except that for these the result
8645 	 is always zero.  */
8646       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8647 	  && (code == MULT || code == AND || code == UMIN)
8648 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8649 	{
8650 	  cond0 = XEXP (XEXP (x, 0), 0);
8651 	  cond1 = XEXP (XEXP (x, 1), 0);
8652 
8653 	  if (COMPARISON_P (cond0)
8654 	      && COMPARISON_P (cond1)
8655 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8656 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8657 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8658 		  || ((swap_condition (GET_CODE (cond0))
8659 		       == reversed_comparison_code (cond1, NULL))
8660 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8661 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8662 	      && ! side_effects_p (x))
8663 	    {
8664 	      *ptrue = *pfalse = const0_rtx;
8665 	      return cond0;
8666 	    }
8667 	}
8668     }
8669 
8670   else if (code == IF_THEN_ELSE)
8671     {
8672       /* If we have IF_THEN_ELSE already, extract the condition and
8673 	 canonicalize it if it is NE or EQ.  */
8674       cond0 = XEXP (x, 0);
8675       *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8676       if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8677 	return XEXP (cond0, 0);
8678       else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8679 	{
8680 	  *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8681 	  return XEXP (cond0, 0);
8682 	}
8683       else
8684 	return cond0;
8685     }
8686 
8687   /* If X is a SUBREG, we can narrow both the true and false values
8688      if the inner expression, if there is a condition.  */
8689   else if (code == SUBREG
8690 	   && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8691 					       &true0, &false0)))
8692     {
8693       true0 = simplify_gen_subreg (mode, true0,
8694 				   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8695       false0 = simplify_gen_subreg (mode, false0,
8696 				    GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8697       if (true0 && false0)
8698 	{
8699 	  *ptrue = true0;
8700 	  *pfalse = false0;
8701 	  return cond0;
8702 	}
8703     }
8704 
8705   /* If X is a constant, this isn't special and will cause confusions
8706      if we treat it as such.  Likewise if it is equivalent to a constant.  */
8707   else if (CONSTANT_P (x)
8708 	   || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
8709     ;
8710 
8711   /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
8712      will be least confusing to the rest of the compiler.  */
8713   else if (mode == BImode)
8714     {
8715       *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
8716       return x;
8717     }
8718 
8719   /* If X is known to be either 0 or -1, those are the true and
8720      false values when testing X.  */
8721   else if (x == constm1_rtx || x == const0_rtx
8722 	   || (mode != VOIDmode
8723 	       && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
8724     {
8725       *ptrue = constm1_rtx, *pfalse = const0_rtx;
8726       return x;
8727     }
8728 
8729   /* Likewise for 0 or a single bit.  */
8730   else if (HWI_COMPUTABLE_MODE_P (mode)
8731 	   && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
8732     {
8733       *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
8734       return x;
8735     }
8736 
8737   /* Otherwise fail; show no condition with true and false values the same.  */
8738   *ptrue = *pfalse = x;
8739   return 0;
8740 }
8741 
8742 /* Return the value of expression X given the fact that condition COND
8743    is known to be true when applied to REG as its first operand and VAL
8744    as its second.  X is known to not be shared and so can be modified in
8745    place.
8746 
8747    We only handle the simplest cases, and specifically those cases that
8748    arise with IF_THEN_ELSE expressions.  */
8749 
8750 static rtx
8751 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
8752 {
8753   enum rtx_code code = GET_CODE (x);
8754   rtx temp;
8755   const char *fmt;
8756   int i, j;
8757 
8758   if (side_effects_p (x))
8759     return x;
8760 
8761   /* If either operand of the condition is a floating point value,
8762      then we have to avoid collapsing an EQ comparison.  */
8763   if (cond == EQ
8764       && rtx_equal_p (x, reg)
8765       && ! FLOAT_MODE_P (GET_MODE (x))
8766       && ! FLOAT_MODE_P (GET_MODE (val)))
8767     return val;
8768 
8769   if (cond == UNEQ && rtx_equal_p (x, reg))
8770     return val;
8771 
8772   /* If X is (abs REG) and we know something about REG's relationship
8773      with zero, we may be able to simplify this.  */
8774 
8775   if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
8776     switch (cond)
8777       {
8778       case GE:  case GT:  case EQ:
8779 	return XEXP (x, 0);
8780       case LT:  case LE:
8781 	return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
8782 				   XEXP (x, 0),
8783 				   GET_MODE (XEXP (x, 0)));
8784       default:
8785 	break;
8786       }
8787 
8788   /* The only other cases we handle are MIN, MAX, and comparisons if the
8789      operands are the same as REG and VAL.  */
8790 
8791   else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
8792     {
8793       if (rtx_equal_p (XEXP (x, 0), val))
8794 	cond = swap_condition (cond), temp = val, val = reg, reg = temp;
8795 
8796       if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
8797 	{
8798 	  if (COMPARISON_P (x))
8799 	    {
8800 	      if (comparison_dominates_p (cond, code))
8801 		return const_true_rtx;
8802 
8803 	      code = reversed_comparison_code (x, NULL);
8804 	      if (code != UNKNOWN
8805 		  && comparison_dominates_p (cond, code))
8806 		return const0_rtx;
8807 	      else
8808 		return x;
8809 	    }
8810 	  else if (code == SMAX || code == SMIN
8811 		   || code == UMIN || code == UMAX)
8812 	    {
8813 	      int unsignedp = (code == UMIN || code == UMAX);
8814 
8815 	      /* Do not reverse the condition when it is NE or EQ.
8816 		 This is because we cannot conclude anything about
8817 		 the value of 'SMAX (x, y)' when x is not equal to y,
8818 		 but we can when x equals y.  */
8819 	      if ((code == SMAX || code == UMAX)
8820 		  && ! (cond == EQ || cond == NE))
8821 		cond = reverse_condition (cond);
8822 
8823 	      switch (cond)
8824 		{
8825 		case GE:   case GT:
8826 		  return unsignedp ? x : XEXP (x, 1);
8827 		case LE:   case LT:
8828 		  return unsignedp ? x : XEXP (x, 0);
8829 		case GEU:  case GTU:
8830 		  return unsignedp ? XEXP (x, 1) : x;
8831 		case LEU:  case LTU:
8832 		  return unsignedp ? XEXP (x, 0) : x;
8833 		default:
8834 		  break;
8835 		}
8836 	    }
8837 	}
8838     }
8839   else if (code == SUBREG)
8840     {
8841       enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
8842       rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
8843 
8844       if (SUBREG_REG (x) != r)
8845 	{
8846 	  /* We must simplify subreg here, before we lose track of the
8847 	     original inner_mode.  */
8848 	  new_rtx = simplify_subreg (GET_MODE (x), r,
8849 				 inner_mode, SUBREG_BYTE (x));
8850 	  if (new_rtx)
8851 	    return new_rtx;
8852 	  else
8853 	    SUBST (SUBREG_REG (x), r);
8854 	}
8855 
8856       return x;
8857     }
8858   /* We don't have to handle SIGN_EXTEND here, because even in the
8859      case of replacing something with a modeless CONST_INT, a
8860      CONST_INT is already (supposed to be) a valid sign extension for
8861      its narrower mode, which implies it's already properly
8862      sign-extended for the wider mode.  Now, for ZERO_EXTEND, the
8863      story is different.  */
8864   else if (code == ZERO_EXTEND)
8865     {
8866       enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
8867       rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
8868 
8869       if (XEXP (x, 0) != r)
8870 	{
8871 	  /* We must simplify the zero_extend here, before we lose
8872 	     track of the original inner_mode.  */
8873 	  new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
8874 					  r, inner_mode);
8875 	  if (new_rtx)
8876 	    return new_rtx;
8877 	  else
8878 	    SUBST (XEXP (x, 0), r);
8879 	}
8880 
8881       return x;
8882     }
8883 
8884   fmt = GET_RTX_FORMAT (code);
8885   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8886     {
8887       if (fmt[i] == 'e')
8888 	SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
8889       else if (fmt[i] == 'E')
8890 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8891 	  SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
8892 						cond, reg, val));
8893     }
8894 
8895   return x;
8896 }
8897 
8898 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
8899    assignment as a field assignment.  */
8900 
8901 static int
8902 rtx_equal_for_field_assignment_p (rtx x, rtx y)
8903 {
8904   if (x == y || rtx_equal_p (x, y))
8905     return 1;
8906 
8907   if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
8908     return 0;
8909 
8910   /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
8911      Note that all SUBREGs of MEM are paradoxical; otherwise they
8912      would have been rewritten.  */
8913   if (MEM_P (x) && GET_CODE (y) == SUBREG
8914       && MEM_P (SUBREG_REG (y))
8915       && rtx_equal_p (SUBREG_REG (y),
8916 		      gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
8917     return 1;
8918 
8919   if (MEM_P (y) && GET_CODE (x) == SUBREG
8920       && MEM_P (SUBREG_REG (x))
8921       && rtx_equal_p (SUBREG_REG (x),
8922 		      gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
8923     return 1;
8924 
8925   /* We used to see if get_last_value of X and Y were the same but that's
8926      not correct.  In one direction, we'll cause the assignment to have
8927      the wrong destination and in the case, we'll import a register into this
8928      insn that might have already have been dead.   So fail if none of the
8929      above cases are true.  */
8930   return 0;
8931 }
8932 
8933 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
8934    Return that assignment if so.
8935 
8936    We only handle the most common cases.  */
8937 
8938 static rtx
8939 make_field_assignment (rtx x)
8940 {
8941   rtx dest = SET_DEST (x);
8942   rtx src = SET_SRC (x);
8943   rtx assign;
8944   rtx rhs, lhs;
8945   HOST_WIDE_INT c1;
8946   HOST_WIDE_INT pos;
8947   unsigned HOST_WIDE_INT len;
8948   rtx other;
8949   enum machine_mode mode;
8950 
8951   /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
8952      a clear of a one-bit field.  We will have changed it to
8953      (and (rotate (const_int -2) POS) DEST), so check for that.  Also check
8954      for a SUBREG.  */
8955 
8956   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
8957       && CONST_INT_P (XEXP (XEXP (src, 0), 0))
8958       && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
8959       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8960     {
8961       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8962 				1, 1, 1, 0);
8963       if (assign != 0)
8964 	return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8965       return x;
8966     }
8967 
8968   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
8969       && subreg_lowpart_p (XEXP (src, 0))
8970       && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
8971 	  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
8972       && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
8973       && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
8974       && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
8975       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8976     {
8977       assign = make_extraction (VOIDmode, dest, 0,
8978 				XEXP (SUBREG_REG (XEXP (src, 0)), 1),
8979 				1, 1, 1, 0);
8980       if (assign != 0)
8981 	return gen_rtx_SET (VOIDmode, assign, const0_rtx);
8982       return x;
8983     }
8984 
8985   /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
8986      one-bit field.  */
8987   if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
8988       && XEXP (XEXP (src, 0), 0) == const1_rtx
8989       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
8990     {
8991       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
8992 				1, 1, 1, 0);
8993       if (assign != 0)
8994 	return gen_rtx_SET (VOIDmode, assign, const1_rtx);
8995       return x;
8996     }
8997 
8998   /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
8999      SRC is an AND with all bits of that field set, then we can discard
9000      the AND.  */
9001   if (GET_CODE (dest) == ZERO_EXTRACT
9002       && CONST_INT_P (XEXP (dest, 1))
9003       && GET_CODE (src) == AND
9004       && CONST_INT_P (XEXP (src, 1)))
9005     {
9006       HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9007       unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9008       unsigned HOST_WIDE_INT ze_mask;
9009 
9010       if (width >= HOST_BITS_PER_WIDE_INT)
9011 	ze_mask = -1;
9012       else
9013 	ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9014 
9015       /* Complete overlap.  We can remove the source AND.  */
9016       if ((and_mask & ze_mask) == ze_mask)
9017 	return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));
9018 
9019       /* Partial overlap.  We can reduce the source AND.  */
9020       if ((and_mask & ze_mask) != and_mask)
9021 	{
9022 	  mode = GET_MODE (src);
9023 	  src = gen_rtx_AND (mode, XEXP (src, 0),
9024 			     gen_int_mode (and_mask & ze_mask, mode));
9025 	  return gen_rtx_SET (VOIDmode, dest, src);
9026 	}
9027     }
9028 
9029   /* The other case we handle is assignments into a constant-position
9030      field.  They look like (ior/xor (and DEST C1) OTHER).  If C1 represents
9031      a mask that has all one bits except for a group of zero bits and
9032      OTHER is known to have zeros where C1 has ones, this is such an
9033      assignment.  Compute the position and length from C1.  Shift OTHER
9034      to the appropriate position, force it to the required mode, and
9035      make the extraction.  Check for the AND in both operands.  */
9036 
9037   if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9038     return x;
9039 
9040   rhs = expand_compound_operation (XEXP (src, 0));
9041   lhs = expand_compound_operation (XEXP (src, 1));
9042 
9043   if (GET_CODE (rhs) == AND
9044       && CONST_INT_P (XEXP (rhs, 1))
9045       && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9046     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9047   else if (GET_CODE (lhs) == AND
9048 	   && CONST_INT_P (XEXP (lhs, 1))
9049 	   && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9050     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9051   else
9052     return x;
9053 
9054   pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9055   if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9056       || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9057       || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9058     return x;
9059 
9060   assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9061   if (assign == 0)
9062     return x;
9063 
9064   /* The mode to use for the source is the mode of the assignment, or of
9065      what is inside a possible STRICT_LOW_PART.  */
9066   mode = (GET_CODE (assign) == STRICT_LOW_PART
9067 	  ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9068 
9069   /* Shift OTHER right POS places and make it the source, restricting it
9070      to the proper length and mode.  */
9071 
9072   src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9073 						     GET_MODE (src),
9074 						     other, pos),
9075 			       dest);
9076   src = force_to_mode (src, mode,
9077 		       GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9078 		       ? ~(unsigned HOST_WIDE_INT) 0
9079 		       : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9080 		       0);
9081 
9082   /* If SRC is masked by an AND that does not make a difference in
9083      the value being stored, strip it.  */
9084   if (GET_CODE (assign) == ZERO_EXTRACT
9085       && CONST_INT_P (XEXP (assign, 1))
9086       && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9087       && GET_CODE (src) == AND
9088       && CONST_INT_P (XEXP (src, 1))
9089       && UINTVAL (XEXP (src, 1))
9090 	 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9091     src = XEXP (src, 0);
9092 
9093   return gen_rtx_SET (VOIDmode, assign, src);
9094 }
9095 
9096 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9097    if so.  */
9098 
9099 static rtx
9100 apply_distributive_law (rtx x)
9101 {
9102   enum rtx_code code = GET_CODE (x);
9103   enum rtx_code inner_code;
9104   rtx lhs, rhs, other;
9105   rtx tem;
9106 
9107   /* Distributivity is not true for floating point as it can change the
9108      value.  So we don't do it unless -funsafe-math-optimizations.  */
9109   if (FLOAT_MODE_P (GET_MODE (x))
9110       && ! flag_unsafe_math_optimizations)
9111     return x;
9112 
9113   /* The outer operation can only be one of the following:  */
9114   if (code != IOR && code != AND && code != XOR
9115       && code != PLUS && code != MINUS)
9116     return x;
9117 
9118   lhs = XEXP (x, 0);
9119   rhs = XEXP (x, 1);
9120 
9121   /* If either operand is a primitive we can't do anything, so get out
9122      fast.  */
9123   if (OBJECT_P (lhs) || OBJECT_P (rhs))
9124     return x;
9125 
9126   lhs = expand_compound_operation (lhs);
9127   rhs = expand_compound_operation (rhs);
9128   inner_code = GET_CODE (lhs);
9129   if (inner_code != GET_CODE (rhs))
9130     return x;
9131 
9132   /* See if the inner and outer operations distribute.  */
9133   switch (inner_code)
9134     {
9135     case LSHIFTRT:
9136     case ASHIFTRT:
9137     case AND:
9138     case IOR:
9139       /* These all distribute except over PLUS.  */
9140       if (code == PLUS || code == MINUS)
9141 	return x;
9142       break;
9143 
9144     case MULT:
9145       if (code != PLUS && code != MINUS)
9146 	return x;
9147       break;
9148 
9149     case ASHIFT:
9150       /* This is also a multiply, so it distributes over everything.  */
9151       break;
9152 
9153     /* This used to handle SUBREG, but this turned out to be counter-
9154        productive, since (subreg (op ...)) usually is not handled by
9155        insn patterns, and this "optimization" therefore transformed
9156        recognizable patterns into unrecognizable ones.  Therefore the
9157        SUBREG case was removed from here.
9158 
9159        It is possible that distributing SUBREG over arithmetic operations
9160        leads to an intermediate result than can then be optimized further,
9161        e.g. by moving the outer SUBREG to the other side of a SET as done
9162        in simplify_set.  This seems to have been the original intent of
9163        handling SUBREGs here.
9164 
9165        However, with current GCC this does not appear to actually happen,
9166        at least on major platforms.  If some case is found where removing
9167        the SUBREG case here prevents follow-on optimizations, distributing
9168        SUBREGs ought to be re-added at that place, e.g. in simplify_set.  */
9169 
9170     default:
9171       return x;
9172     }
9173 
9174   /* Set LHS and RHS to the inner operands (A and B in the example
9175      above) and set OTHER to the common operand (C in the example).
9176      There is only one way to do this unless the inner operation is
9177      commutative.  */
9178   if (COMMUTATIVE_ARITH_P (lhs)
9179       && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9180     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9181   else if (COMMUTATIVE_ARITH_P (lhs)
9182 	   && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9183     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9184   else if (COMMUTATIVE_ARITH_P (lhs)
9185 	   && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9186     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9187   else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9188     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9189   else
9190     return x;
9191 
9192   /* Form the new inner operation, seeing if it simplifies first.  */
9193   tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9194 
9195   /* There is one exception to the general way of distributing:
9196      (a | c) ^ (b | c) -> (a ^ b) & ~c  */
9197   if (code == XOR && inner_code == IOR)
9198     {
9199       inner_code = AND;
9200       other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9201     }
9202 
9203   /* We may be able to continuing distributing the result, so call
9204      ourselves recursively on the inner operation before forming the
9205      outer operation, which we return.  */
9206   return simplify_gen_binary (inner_code, GET_MODE (x),
9207 			      apply_distributive_law (tem), other);
9208 }
9209 
9210 /* See if X is of the form (* (+ A B) C), and if so convert to
9211    (+ (* A C) (* B C)) and try to simplify.
9212 
9213    Most of the time, this results in no change.  However, if some of
9214    the operands are the same or inverses of each other, simplifications
9215    will result.
9216 
9217    For example, (and (ior A B) (not B)) can occur as the result of
9218    expanding a bit field assignment.  When we apply the distributive
9219    law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9220    which then simplifies to (and (A (not B))).
9221 
9222    Note that no checks happen on the validity of applying the inverse
9223    distributive law.  This is pointless since we can do it in the
9224    few places where this routine is called.
9225 
9226    N is the index of the term that is decomposed (the arithmetic operation,
9227    i.e. (+ A B) in the first example above).  !N is the index of the term that
9228    is distributed, i.e. of C in the first example above.  */
9229 static rtx
9230 distribute_and_simplify_rtx (rtx x, int n)
9231 {
9232   enum machine_mode mode;
9233   enum rtx_code outer_code, inner_code;
9234   rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9235 
9236   /* Distributivity is not true for floating point as it can change the
9237      value.  So we don't do it unless -funsafe-math-optimizations.  */
9238   if (FLOAT_MODE_P (GET_MODE (x))
9239       && ! flag_unsafe_math_optimizations)
9240     return NULL_RTX;
9241 
9242   decomposed = XEXP (x, n);
9243   if (!ARITHMETIC_P (decomposed))
9244     return NULL_RTX;
9245 
9246   mode = GET_MODE (x);
9247   outer_code = GET_CODE (x);
9248   distributed = XEXP (x, !n);
9249 
9250   inner_code = GET_CODE (decomposed);
9251   inner_op0 = XEXP (decomposed, 0);
9252   inner_op1 = XEXP (decomposed, 1);
9253 
9254   /* Special case (and (xor B C) (not A)), which is equivalent to
9255      (xor (ior A B) (ior A C))  */
9256   if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9257     {
9258       distributed = XEXP (distributed, 0);
9259       outer_code = IOR;
9260     }
9261 
9262   if (n == 0)
9263     {
9264       /* Distribute the second term.  */
9265       new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9266       new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9267     }
9268   else
9269     {
9270       /* Distribute the first term.  */
9271       new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9272       new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9273     }
9274 
9275   tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9276 						     new_op0, new_op1));
9277   if (GET_CODE (tmp) != outer_code
9278       && (set_src_cost (tmp, optimize_this_for_speed_p)
9279 	  < set_src_cost (x, optimize_this_for_speed_p)))
9280     return tmp;
9281 
9282   return NULL_RTX;
9283 }
9284 
9285 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9286    in MODE.  Return an equivalent form, if different from (and VAROP
9287    (const_int CONSTOP)).  Otherwise, return NULL_RTX.  */
9288 
9289 static rtx
9290 simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
9291 			  unsigned HOST_WIDE_INT constop)
9292 {
9293   unsigned HOST_WIDE_INT nonzero;
9294   unsigned HOST_WIDE_INT orig_constop;
9295   rtx orig_varop;
9296   int i;
9297 
9298   orig_varop = varop;
9299   orig_constop = constop;
9300   if (GET_CODE (varop) == CLOBBER)
9301     return NULL_RTX;
9302 
9303   /* Simplify VAROP knowing that we will be only looking at some of the
9304      bits in it.
9305 
9306      Note by passing in CONSTOP, we guarantee that the bits not set in
9307      CONSTOP are not significant and will never be examined.  We must
9308      ensure that is the case by explicitly masking out those bits
9309      before returning.  */
9310   varop = force_to_mode (varop, mode, constop, 0);
9311 
9312   /* If VAROP is a CLOBBER, we will fail so return it.  */
9313   if (GET_CODE (varop) == CLOBBER)
9314     return varop;
9315 
9316   /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9317      to VAROP and return the new constant.  */
9318   if (CONST_INT_P (varop))
9319     return gen_int_mode (INTVAL (varop) & constop, mode);
9320 
9321   /* See what bits may be nonzero in VAROP.  Unlike the general case of
9322      a call to nonzero_bits, here we don't care about bits outside
9323      MODE.  */
9324 
9325   nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9326 
9327   /* Turn off all bits in the constant that are known to already be zero.
9328      Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9329      which is tested below.  */
9330 
9331   constop &= nonzero;
9332 
9333   /* If we don't have any bits left, return zero.  */
9334   if (constop == 0)
9335     return const0_rtx;
9336 
9337   /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9338      a power of two, we can replace this with an ASHIFT.  */
9339   if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9340       && (i = exact_log2 (constop)) >= 0)
9341     return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9342 
9343   /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9344      or XOR, then try to apply the distributive law.  This may eliminate
9345      operations if either branch can be simplified because of the AND.
9346      It may also make some cases more complex, but those cases probably
9347      won't match a pattern either with or without this.  */
9348 
9349   if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9350     return
9351       gen_lowpart
9352 	(mode,
9353 	 apply_distributive_law
9354 	 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9355 			       simplify_and_const_int (NULL_RTX,
9356 						       GET_MODE (varop),
9357 						       XEXP (varop, 0),
9358 						       constop),
9359 			       simplify_and_const_int (NULL_RTX,
9360 						       GET_MODE (varop),
9361 						       XEXP (varop, 1),
9362 						       constop))));
9363 
9364   /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9365      the AND and see if one of the operands simplifies to zero.  If so, we
9366      may eliminate it.  */
9367 
9368   if (GET_CODE (varop) == PLUS
9369       && exact_log2 (constop + 1) >= 0)
9370     {
9371       rtx o0, o1;
9372 
9373       o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9374       o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9375       if (o0 == const0_rtx)
9376 	return o1;
9377       if (o1 == const0_rtx)
9378 	return o0;
9379     }
9380 
9381   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
9382   varop = gen_lowpart (mode, varop);
9383   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9384     return NULL_RTX;
9385 
9386   /* If we are only masking insignificant bits, return VAROP.  */
9387   if (constop == nonzero)
9388     return varop;
9389 
9390   if (varop == orig_varop && constop == orig_constop)
9391     return NULL_RTX;
9392 
9393   /* Otherwise, return an AND.  */
9394   return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9395 }
9396 
9397 
9398 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9399    in MODE.
9400 
9401    Return an equivalent form, if different from X.  Otherwise, return X.  If
9402    X is zero, we are to always construct the equivalent form.  */
9403 
9404 static rtx
9405 simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
9406 			unsigned HOST_WIDE_INT constop)
9407 {
9408   rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9409   if (tem)
9410     return tem;
9411 
9412   if (!x)
9413     x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9414 			     gen_int_mode (constop, mode));
9415   if (GET_MODE (x) != mode)
9416     x = gen_lowpart (mode, x);
9417   return x;
9418 }
9419 
9420 /* Given a REG, X, compute which bits in X can be nonzero.
9421    We don't care about bits outside of those defined in MODE.
9422 
9423    For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9424    a shift, AND, or zero_extract, we can do better.  */
9425 
9426 static rtx
9427 reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
9428 			      const_rtx known_x ATTRIBUTE_UNUSED,
9429 			      enum machine_mode known_mode ATTRIBUTE_UNUSED,
9430 			      unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9431 			      unsigned HOST_WIDE_INT *nonzero)
9432 {
9433   rtx tem;
9434   reg_stat_type *rsp;
9435 
9436   /* If X is a register whose nonzero bits value is current, use it.
9437      Otherwise, if X is a register whose value we can find, use that
9438      value.  Otherwise, use the previously-computed global nonzero bits
9439      for this register.  */
9440 
9441   rsp = &reg_stat[REGNO (x)];
9442   if (rsp->last_set_value != 0
9443       && (rsp->last_set_mode == mode
9444 	  || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9445 	      && GET_MODE_CLASS (mode) == MODE_INT))
9446       && ((rsp->last_set_label >= label_tick_ebb_start
9447 	   && rsp->last_set_label < label_tick)
9448 	  || (rsp->last_set_label == label_tick
9449               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9450 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9451 	      && REG_N_SETS (REGNO (x)) == 1
9452 	      && !REGNO_REG_SET_P
9453 	          (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9454     {
9455       *nonzero &= rsp->last_set_nonzero_bits;
9456       return NULL;
9457     }
9458 
9459   tem = get_last_value (x);
9460 
9461   if (tem)
9462     {
9463 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
9464       /* If X is narrower than MODE and TEM is a non-negative
9465 	 constant that would appear negative in the mode of X,
9466 	 sign-extend it for use in reg_nonzero_bits because some
9467 	 machines (maybe most) will actually do the sign-extension
9468 	 and this is the conservative approach.
9469 
9470 	 ??? For 2.5, try to tighten up the MD files in this regard
9471 	 instead of this kludge.  */
9472 
9473       if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)
9474 	  && CONST_INT_P (tem)
9475 	  && INTVAL (tem) > 0
9476 	  && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem)))
9477 	tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x)));
9478 #endif
9479       return tem;
9480     }
9481   else if (nonzero_sign_valid && rsp->nonzero_bits)
9482     {
9483       unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9484 
9485       if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9486 	/* We don't know anything about the upper bits.  */
9487 	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9488       *nonzero &= mask;
9489     }
9490 
9491   return NULL;
9492 }
9493 
9494 /* Return the number of bits at the high-order end of X that are known to
9495    be equal to the sign bit.  X will be used in mode MODE; if MODE is
9496    VOIDmode, X will be used in its own mode.  The returned value  will always
9497    be between 1 and the number of bits in MODE.  */
9498 
9499 static rtx
9500 reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
9501 				     const_rtx known_x ATTRIBUTE_UNUSED,
9502 				     enum machine_mode known_mode
9503 				     ATTRIBUTE_UNUSED,
9504 				     unsigned int known_ret ATTRIBUTE_UNUSED,
9505 				     unsigned int *result)
9506 {
9507   rtx tem;
9508   reg_stat_type *rsp;
9509 
9510   rsp = &reg_stat[REGNO (x)];
9511   if (rsp->last_set_value != 0
9512       && rsp->last_set_mode == mode
9513       && ((rsp->last_set_label >= label_tick_ebb_start
9514 	   && rsp->last_set_label < label_tick)
9515 	  || (rsp->last_set_label == label_tick
9516               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9517 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9518 	      && REG_N_SETS (REGNO (x)) == 1
9519 	      && !REGNO_REG_SET_P
9520 	          (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
9521     {
9522       *result = rsp->last_set_sign_bit_copies;
9523       return NULL;
9524     }
9525 
9526   tem = get_last_value (x);
9527   if (tem != 0)
9528     return tem;
9529 
9530   if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9531       && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9532     *result = rsp->sign_bit_copies;
9533 
9534   return NULL;
9535 }
9536 
9537 /* Return the number of "extended" bits there are in X, when interpreted
9538    as a quantity in MODE whose signedness is indicated by UNSIGNEDP.  For
9539    unsigned quantities, this is the number of high-order zero bits.
9540    For signed quantities, this is the number of copies of the sign bit
9541    minus 1.  In both case, this function returns the number of "spare"
9542    bits.  For example, if two quantities for which this function returns
9543    at least 1 are added, the addition is known not to overflow.
9544 
9545    This function will always return 0 unless called during combine, which
9546    implies that it must be called from a define_split.  */
9547 
9548 unsigned int
9549 extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
9550 {
9551   if (nonzero_sign_valid == 0)
9552     return 0;
9553 
9554   return (unsignedp
9555 	  ? (HWI_COMPUTABLE_MODE_P (mode)
9556 	     ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9557 			       - floor_log2 (nonzero_bits (x, mode)))
9558 	     : 0)
9559 	  : num_sign_bit_copies (x, mode) - 1);
9560 }
9561 
9562 /* This function is called from `simplify_shift_const' to merge two
9563    outer operations.  Specifically, we have already found that we need
9564    to perform operation *POP0 with constant *PCONST0 at the outermost
9565    position.  We would now like to also perform OP1 with constant CONST1
9566    (with *POP0 being done last).
9567 
9568    Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9569    the resulting operation.  *PCOMP_P is set to 1 if we would need to
9570    complement the innermost operand, otherwise it is unchanged.
9571 
9572    MODE is the mode in which the operation will be done.  No bits outside
9573    the width of this mode matter.  It is assumed that the width of this mode
9574    is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9575 
9576    If *POP0 or OP1 are UNKNOWN, it means no operation is required.  Only NEG, PLUS,
9577    IOR, XOR, and AND are supported.  We may set *POP0 to SET if the proper
9578    result is simply *PCONST0.
9579 
9580    If the resulting operation cannot be expressed as one operation, we
9581    return 0 and do not change *POP0, *PCONST0, and *PCOMP_P.  */
9582 
9583 static int
9584 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
9585 {
9586   enum rtx_code op0 = *pop0;
9587   HOST_WIDE_INT const0 = *pconst0;
9588 
9589   const0 &= GET_MODE_MASK (mode);
9590   const1 &= GET_MODE_MASK (mode);
9591 
9592   /* If OP0 is an AND, clear unimportant bits in CONST1.  */
9593   if (op0 == AND)
9594     const1 &= const0;
9595 
9596   /* If OP0 or OP1 is UNKNOWN, this is easy.  Similarly if they are the same or
9597      if OP0 is SET.  */
9598 
9599   if (op1 == UNKNOWN || op0 == SET)
9600     return 1;
9601 
9602   else if (op0 == UNKNOWN)
9603     op0 = op1, const0 = const1;
9604 
9605   else if (op0 == op1)
9606     {
9607       switch (op0)
9608 	{
9609 	case AND:
9610 	  const0 &= const1;
9611 	  break;
9612 	case IOR:
9613 	  const0 |= const1;
9614 	  break;
9615 	case XOR:
9616 	  const0 ^= const1;
9617 	  break;
9618 	case PLUS:
9619 	  const0 += const1;
9620 	  break;
9621 	case NEG:
9622 	  op0 = UNKNOWN;
9623 	  break;
9624 	default:
9625 	  break;
9626 	}
9627     }
9628 
9629   /* Otherwise, if either is a PLUS or NEG, we can't do anything.  */
9630   else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9631     return 0;
9632 
9633   /* If the two constants aren't the same, we can't do anything.  The
9634      remaining six cases can all be done.  */
9635   else if (const0 != const1)
9636     return 0;
9637 
9638   else
9639     switch (op0)
9640       {
9641       case IOR:
9642 	if (op1 == AND)
9643 	  /* (a & b) | b == b */
9644 	  op0 = SET;
9645 	else /* op1 == XOR */
9646 	  /* (a ^ b) | b == a | b */
9647 	  {;}
9648 	break;
9649 
9650       case XOR:
9651 	if (op1 == AND)
9652 	  /* (a & b) ^ b == (~a) & b */
9653 	  op0 = AND, *pcomp_p = 1;
9654 	else /* op1 == IOR */
9655 	  /* (a | b) ^ b == a & ~b */
9656 	  op0 = AND, const0 = ~const0;
9657 	break;
9658 
9659       case AND:
9660 	if (op1 == IOR)
9661 	  /* (a | b) & b == b */
9662 	op0 = SET;
9663 	else /* op1 == XOR */
9664 	  /* (a ^ b) & b) == (~a) & b */
9665 	  *pcomp_p = 1;
9666 	break;
9667       default:
9668 	break;
9669       }
9670 
9671   /* Check for NO-OP cases.  */
9672   const0 &= GET_MODE_MASK (mode);
9673   if (const0 == 0
9674       && (op0 == IOR || op0 == XOR || op0 == PLUS))
9675     op0 = UNKNOWN;
9676   else if (const0 == 0 && op0 == AND)
9677     op0 = SET;
9678   else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
9679 	   && op0 == AND)
9680     op0 = UNKNOWN;
9681 
9682   *pop0 = op0;
9683 
9684   /* ??? Slightly redundant with the above mask, but not entirely.
9685      Moving this above means we'd have to sign-extend the mode mask
9686      for the final test.  */
9687   if (op0 != UNKNOWN && op0 != NEG)
9688     *pconst0 = trunc_int_for_mode (const0, mode);
9689 
9690   return 1;
9691 }
9692 
9693 /* A helper to simplify_shift_const_1 to determine the mode we can perform
9694    the shift in.  The original shift operation CODE is performed on OP in
9695    ORIG_MODE.  Return the wider mode MODE if we can perform the operation
9696    in that mode.  Return ORIG_MODE otherwise.  We can also assume that the
9697    result of the shift is subject to operation OUTER_CODE with operand
9698    OUTER_CONST.  */
9699 
9700 static enum machine_mode
9701 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
9702 		      enum machine_mode orig_mode, enum machine_mode mode,
9703 		      enum rtx_code outer_code, HOST_WIDE_INT outer_const)
9704 {
9705   if (orig_mode == mode)
9706     return mode;
9707   gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
9708 
9709   /* In general we can't perform in wider mode for right shift and rotate.  */
9710   switch (code)
9711     {
9712     case ASHIFTRT:
9713       /* We can still widen if the bits brought in from the left are identical
9714 	 to the sign bit of ORIG_MODE.  */
9715       if (num_sign_bit_copies (op, mode)
9716 	  > (unsigned) (GET_MODE_PRECISION (mode)
9717 			- GET_MODE_PRECISION (orig_mode)))
9718 	return mode;
9719       return orig_mode;
9720 
9721     case LSHIFTRT:
9722       /* Similarly here but with zero bits.  */
9723       if (HWI_COMPUTABLE_MODE_P (mode)
9724 	  && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
9725 	return mode;
9726 
9727       /* We can also widen if the bits brought in will be masked off.  This
9728 	 operation is performed in ORIG_MODE.  */
9729       if (outer_code == AND)
9730 	{
9731 	  int care_bits = low_bitmask_len (orig_mode, outer_const);
9732 
9733 	  if (care_bits >= 0
9734 	      && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
9735 	    return mode;
9736 	}
9737       /* fall through */
9738 
9739     case ROTATE:
9740       return orig_mode;
9741 
9742     case ROTATERT:
9743       gcc_unreachable ();
9744 
9745     default:
9746       return mode;
9747     }
9748 }
9749 
9750 /* Simplify a shift of VAROP by ORIG_COUNT bits.  CODE says what kind
9751    of shift.  The result of the shift is RESULT_MODE.  Return NULL_RTX
9752    if we cannot simplify it.  Otherwise, return a simplified value.
9753 
9754    The shift is normally computed in the widest mode we find in VAROP, as
9755    long as it isn't a different number of words than RESULT_MODE.  Exceptions
9756    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
9757 
9758 static rtx
9759 simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
9760 			rtx varop, int orig_count)
9761 {
9762   enum rtx_code orig_code = code;
9763   rtx orig_varop = varop;
9764   int count;
9765   enum machine_mode mode = result_mode;
9766   enum machine_mode shift_mode, tmode;
9767   unsigned int mode_words
9768     = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
9769   /* We form (outer_op (code varop count) (outer_const)).  */
9770   enum rtx_code outer_op = UNKNOWN;
9771   HOST_WIDE_INT outer_const = 0;
9772   int complement_p = 0;
9773   rtx new_rtx, x;
9774 
9775   /* Make sure and truncate the "natural" shift on the way in.  We don't
9776      want to do this inside the loop as it makes it more difficult to
9777      combine shifts.  */
9778   if (SHIFT_COUNT_TRUNCATED)
9779     orig_count &= GET_MODE_BITSIZE (mode) - 1;
9780 
9781   /* If we were given an invalid count, don't do anything except exactly
9782      what was requested.  */
9783 
9784   if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
9785     return NULL_RTX;
9786 
9787   count = orig_count;
9788 
9789   /* Unless one of the branches of the `if' in this loop does a `continue',
9790      we will `break' the loop after the `if'.  */
9791 
9792   while (count != 0)
9793     {
9794       /* If we have an operand of (clobber (const_int 0)), fail.  */
9795       if (GET_CODE (varop) == CLOBBER)
9796 	return NULL_RTX;
9797 
9798       /* Convert ROTATERT to ROTATE.  */
9799       if (code == ROTATERT)
9800 	{
9801 	  unsigned int bitsize = GET_MODE_PRECISION (result_mode);
9802 	  code = ROTATE;
9803 	  if (VECTOR_MODE_P (result_mode))
9804 	    count = bitsize / GET_MODE_NUNITS (result_mode) - count;
9805 	  else
9806 	    count = bitsize - count;
9807 	}
9808 
9809       shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
9810 					 mode, outer_op, outer_const);
9811 
9812       /* Handle cases where the count is greater than the size of the mode
9813 	 minus 1.  For ASHIFT, use the size minus one as the count (this can
9814 	 occur when simplifying (lshiftrt (ashiftrt ..))).  For rotates,
9815 	 take the count modulo the size.  For other shifts, the result is
9816 	 zero.
9817 
9818 	 Since these shifts are being produced by the compiler by combining
9819 	 multiple operations, each of which are defined, we know what the
9820 	 result is supposed to be.  */
9821 
9822       if (count > (GET_MODE_PRECISION (shift_mode) - 1))
9823 	{
9824 	  if (code == ASHIFTRT)
9825 	    count = GET_MODE_PRECISION (shift_mode) - 1;
9826 	  else if (code == ROTATE || code == ROTATERT)
9827 	    count %= GET_MODE_PRECISION (shift_mode);
9828 	  else
9829 	    {
9830 	      /* We can't simply return zero because there may be an
9831 		 outer op.  */
9832 	      varop = const0_rtx;
9833 	      count = 0;
9834 	      break;
9835 	    }
9836 	}
9837 
9838       /* If we discovered we had to complement VAROP, leave.  Making a NOT
9839 	 here would cause an infinite loop.  */
9840       if (complement_p)
9841 	break;
9842 
9843       /* An arithmetic right shift of a quantity known to be -1 or 0
9844 	 is a no-op.  */
9845       if (code == ASHIFTRT
9846 	  && (num_sign_bit_copies (varop, shift_mode)
9847 	      == GET_MODE_PRECISION (shift_mode)))
9848 	{
9849 	  count = 0;
9850 	  break;
9851 	}
9852 
9853       /* If we are doing an arithmetic right shift and discarding all but
9854 	 the sign bit copies, this is equivalent to doing a shift by the
9855 	 bitsize minus one.  Convert it into that shift because it will often
9856 	 allow other simplifications.  */
9857 
9858       if (code == ASHIFTRT
9859 	  && (count + num_sign_bit_copies (varop, shift_mode)
9860 	      >= GET_MODE_PRECISION (shift_mode)))
9861 	count = GET_MODE_PRECISION (shift_mode) - 1;
9862 
9863       /* We simplify the tests below and elsewhere by converting
9864 	 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
9865 	 `make_compound_operation' will convert it to an ASHIFTRT for
9866 	 those machines (such as VAX) that don't have an LSHIFTRT.  */
9867       if (code == ASHIFTRT
9868 	  && val_signbit_known_clear_p (shift_mode,
9869 					nonzero_bits (varop, shift_mode)))
9870 	code = LSHIFTRT;
9871 
9872       if (((code == LSHIFTRT
9873 	    && HWI_COMPUTABLE_MODE_P (shift_mode)
9874 	    && !(nonzero_bits (varop, shift_mode) >> count))
9875 	   || (code == ASHIFT
9876 	       && HWI_COMPUTABLE_MODE_P (shift_mode)
9877 	       && !((nonzero_bits (varop, shift_mode) << count)
9878 		    & GET_MODE_MASK (shift_mode))))
9879 	  && !side_effects_p (varop))
9880 	varop = const0_rtx;
9881 
9882       switch (GET_CODE (varop))
9883 	{
9884 	case SIGN_EXTEND:
9885 	case ZERO_EXTEND:
9886 	case SIGN_EXTRACT:
9887 	case ZERO_EXTRACT:
9888 	  new_rtx = expand_compound_operation (varop);
9889 	  if (new_rtx != varop)
9890 	    {
9891 	      varop = new_rtx;
9892 	      continue;
9893 	    }
9894 	  break;
9895 
9896 	case MEM:
9897 	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
9898 	     minus the width of a smaller mode, we can do this with a
9899 	     SIGN_EXTEND or ZERO_EXTEND from the narrower memory location.  */
9900 	  if ((code == ASHIFTRT || code == LSHIFTRT)
9901 	      && ! mode_dependent_address_p (XEXP (varop, 0),
9902 					     MEM_ADDR_SPACE (varop))
9903 	      && ! MEM_VOLATILE_P (varop)
9904 	      && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
9905 					 MODE_INT, 1)) != BLKmode)
9906 	    {
9907 	      new_rtx = adjust_address_nv (varop, tmode,
9908 				       BYTES_BIG_ENDIAN ? 0
9909 				       : count / BITS_PER_UNIT);
9910 
9911 	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
9912 				     : ZERO_EXTEND, mode, new_rtx);
9913 	      count = 0;
9914 	      continue;
9915 	    }
9916 	  break;
9917 
9918 	case SUBREG:
9919 	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
9920 	     the same number of words as what we've seen so far.  Then store
9921 	     the widest mode in MODE.  */
9922 	  if (subreg_lowpart_p (varop)
9923 	      && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9924 		  > GET_MODE_SIZE (GET_MODE (varop)))
9925 	      && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
9926 				  + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
9927 		 == mode_words
9928 	      && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
9929 	      && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
9930 	    {
9931 	      varop = SUBREG_REG (varop);
9932 	      if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
9933 		mode = GET_MODE (varop);
9934 	      continue;
9935 	    }
9936 	  break;
9937 
9938 	case MULT:
9939 	  /* Some machines use MULT instead of ASHIFT because MULT
9940 	     is cheaper.  But it is still better on those machines to
9941 	     merge two shifts into one.  */
9942 	  if (CONST_INT_P (XEXP (varop, 1))
9943 	      && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9944 	    {
9945 	      varop
9946 		= simplify_gen_binary (ASHIFT, GET_MODE (varop),
9947 				       XEXP (varop, 0),
9948 				       GEN_INT (exact_log2 (
9949 						UINTVAL (XEXP (varop, 1)))));
9950 	      continue;
9951 	    }
9952 	  break;
9953 
9954 	case UDIV:
9955 	  /* Similar, for when divides are cheaper.  */
9956 	  if (CONST_INT_P (XEXP (varop, 1))
9957 	      && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
9958 	    {
9959 	      varop
9960 		= simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
9961 				       XEXP (varop, 0),
9962 				       GEN_INT (exact_log2 (
9963 						UINTVAL (XEXP (varop, 1)))));
9964 	      continue;
9965 	    }
9966 	  break;
9967 
9968 	case ASHIFTRT:
9969 	  /* If we are extracting just the sign bit of an arithmetic
9970 	     right shift, that shift is not needed.  However, the sign
9971 	     bit of a wider mode may be different from what would be
9972 	     interpreted as the sign bit in a narrower mode, so, if
9973 	     the result is narrower, don't discard the shift.  */
9974 	  if (code == LSHIFTRT
9975 	      && count == (GET_MODE_BITSIZE (result_mode) - 1)
9976 	      && (GET_MODE_BITSIZE (result_mode)
9977 		  >= GET_MODE_BITSIZE (GET_MODE (varop))))
9978 	    {
9979 	      varop = XEXP (varop, 0);
9980 	      continue;
9981 	    }
9982 
9983 	  /* ... fall through ...  */
9984 
9985 	case LSHIFTRT:
9986 	case ASHIFT:
9987 	case ROTATE:
9988 	  /* Here we have two nested shifts.  The result is usually the
9989 	     AND of a new shift with a mask.  We compute the result below.  */
9990 	  if (CONST_INT_P (XEXP (varop, 1))
9991 	      && INTVAL (XEXP (varop, 1)) >= 0
9992 	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
9993 	      && HWI_COMPUTABLE_MODE_P (result_mode)
9994 	      && HWI_COMPUTABLE_MODE_P (mode)
9995 	      && !VECTOR_MODE_P (result_mode))
9996 	    {
9997 	      enum rtx_code first_code = GET_CODE (varop);
9998 	      unsigned int first_count = INTVAL (XEXP (varop, 1));
9999 	      unsigned HOST_WIDE_INT mask;
10000 	      rtx mask_rtx;
10001 
10002 	      /* We have one common special case.  We can't do any merging if
10003 		 the inner code is an ASHIFTRT of a smaller mode.  However, if
10004 		 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10005 		 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10006 		 we can convert it to
10007 		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10008 		 This simplifies certain SIGN_EXTEND operations.  */
10009 	      if (code == ASHIFT && first_code == ASHIFTRT
10010 		  && count == (GET_MODE_PRECISION (result_mode)
10011 			       - GET_MODE_PRECISION (GET_MODE (varop))))
10012 		{
10013 		  /* C3 has the low-order C1 bits zero.  */
10014 
10015 		  mask = GET_MODE_MASK (mode)
10016 			 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10017 
10018 		  varop = simplify_and_const_int (NULL_RTX, result_mode,
10019 						  XEXP (varop, 0), mask);
10020 		  varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10021 						varop, count);
10022 		  count = first_count;
10023 		  code = ASHIFTRT;
10024 		  continue;
10025 		}
10026 
10027 	      /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10028 		 than C1 high-order bits equal to the sign bit, we can convert
10029 		 this to either an ASHIFT or an ASHIFTRT depending on the
10030 		 two counts.
10031 
10032 		 We cannot do this if VAROP's mode is not SHIFT_MODE.  */
10033 
10034 	      if (code == ASHIFTRT && first_code == ASHIFT
10035 		  && GET_MODE (varop) == shift_mode
10036 		  && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10037 		      > first_count))
10038 		{
10039 		  varop = XEXP (varop, 0);
10040 		  count -= first_count;
10041 		  if (count < 0)
10042 		    {
10043 		      count = -count;
10044 		      code = ASHIFT;
10045 		    }
10046 
10047 		  continue;
10048 		}
10049 
10050 	      /* There are some cases we can't do.  If CODE is ASHIFTRT,
10051 		 we can only do this if FIRST_CODE is also ASHIFTRT.
10052 
10053 		 We can't do the case when CODE is ROTATE and FIRST_CODE is
10054 		 ASHIFTRT.
10055 
10056 		 If the mode of this shift is not the mode of the outer shift,
10057 		 we can't do this if either shift is a right shift or ROTATE.
10058 
10059 		 Finally, we can't do any of these if the mode is too wide
10060 		 unless the codes are the same.
10061 
10062 		 Handle the case where the shift codes are the same
10063 		 first.  */
10064 
10065 	      if (code == first_code)
10066 		{
10067 		  if (GET_MODE (varop) != result_mode
10068 		      && (code == ASHIFTRT || code == LSHIFTRT
10069 			  || code == ROTATE))
10070 		    break;
10071 
10072 		  count += first_count;
10073 		  varop = XEXP (varop, 0);
10074 		  continue;
10075 		}
10076 
10077 	      if (code == ASHIFTRT
10078 		  || (code == ROTATE && first_code == ASHIFTRT)
10079 		  || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10080 		  || (GET_MODE (varop) != result_mode
10081 		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
10082 			  || first_code == ROTATE
10083 			  || code == ROTATE)))
10084 		break;
10085 
10086 	      /* To compute the mask to apply after the shift, shift the
10087 		 nonzero bits of the inner shift the same way the
10088 		 outer shift will.  */
10089 
10090 	      mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));
10091 
10092 	      mask_rtx
10093 		= simplify_const_binary_operation (code, result_mode, mask_rtx,
10094 						   GEN_INT (count));
10095 
10096 	      /* Give up if we can't compute an outer operation to use.  */
10097 	      if (mask_rtx == 0
10098 		  || !CONST_INT_P (mask_rtx)
10099 		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
10100 					INTVAL (mask_rtx),
10101 					result_mode, &complement_p))
10102 		break;
10103 
10104 	      /* If the shifts are in the same direction, we add the
10105 		 counts.  Otherwise, we subtract them.  */
10106 	      if ((code == ASHIFTRT || code == LSHIFTRT)
10107 		  == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10108 		count += first_count;
10109 	      else
10110 		count -= first_count;
10111 
10112 	      /* If COUNT is positive, the new shift is usually CODE,
10113 		 except for the two exceptions below, in which case it is
10114 		 FIRST_CODE.  If the count is negative, FIRST_CODE should
10115 		 always be used  */
10116 	      if (count > 0
10117 		  && ((first_code == ROTATE && code == ASHIFT)
10118 		      || (first_code == ASHIFTRT && code == LSHIFTRT)))
10119 		code = first_code;
10120 	      else if (count < 0)
10121 		code = first_code, count = -count;
10122 
10123 	      varop = XEXP (varop, 0);
10124 	      continue;
10125 	    }
10126 
10127 	  /* If we have (A << B << C) for any shift, we can convert this to
10128 	     (A << C << B).  This wins if A is a constant.  Only try this if
10129 	     B is not a constant.  */
10130 
10131 	  else if (GET_CODE (varop) == code
10132 		   && CONST_INT_P (XEXP (varop, 0))
10133 		   && !CONST_INT_P (XEXP (varop, 1)))
10134 	    {
10135 	      rtx new_rtx = simplify_const_binary_operation (code, mode,
10136 							 XEXP (varop, 0),
10137 							 GEN_INT (count));
10138 	      varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10139 	      count = 0;
10140 	      continue;
10141 	    }
10142 	  break;
10143 
10144 	case NOT:
10145 	  if (VECTOR_MODE_P (mode))
10146 	    break;
10147 
10148 	  /* Make this fit the case below.  */
10149 	  varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10150 	  continue;
10151 
10152 	case IOR:
10153 	case AND:
10154 	case XOR:
10155 	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10156 	     with C the size of VAROP - 1 and the shift is logical if
10157 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10158 	     we have an (le X 0) operation.   If we have an arithmetic shift
10159 	     and STORE_FLAG_VALUE is 1 or we have a logical shift with
10160 	     STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation.  */
10161 
10162 	  if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10163 	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10164 	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10165 	      && (code == LSHIFTRT || code == ASHIFTRT)
10166 	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10167 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10168 	    {
10169 	      count = 0;
10170 	      varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10171 				  const0_rtx);
10172 
10173 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10174 		varop = gen_rtx_NEG (GET_MODE (varop), varop);
10175 
10176 	      continue;
10177 	    }
10178 
10179 	  /* If we have (shift (logical)), move the logical to the outside
10180 	     to allow it to possibly combine with another logical and the
10181 	     shift to combine with another shift.  This also canonicalizes to
10182 	     what a ZERO_EXTRACT looks like.  Also, some machines have
10183 	     (and (shift)) insns.  */
10184 
10185 	  if (CONST_INT_P (XEXP (varop, 1))
10186 	      /* We can't do this if we have (ashiftrt (xor))  and the
10187 		 constant has its sign bit set in shift_mode.  */
10188 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10189 		   && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10190 					      shift_mode))
10191 	      && (new_rtx = simplify_const_binary_operation (code, result_mode,
10192 							 XEXP (varop, 1),
10193 							 GEN_INT (count))) != 0
10194 	      && CONST_INT_P (new_rtx)
10195 	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10196 				  INTVAL (new_rtx), result_mode, &complement_p))
10197 	    {
10198 	      varop = XEXP (varop, 0);
10199 	      continue;
10200 	    }
10201 
10202 	  /* If we can't do that, try to simplify the shift in each arm of the
10203 	     logical expression, make a new logical expression, and apply
10204 	     the inverse distributive law.  This also can't be done
10205 	     for some (ashiftrt (xor)).  */
10206 	  if (CONST_INT_P (XEXP (varop, 1))
10207 	     && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10208 		  && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10209 					     shift_mode)))
10210 	    {
10211 	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10212 					      XEXP (varop, 0), count);
10213 	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10214 					      XEXP (varop, 1), count);
10215 
10216 	      varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10217 					   lhs, rhs);
10218 	      varop = apply_distributive_law (varop);
10219 
10220 	      count = 0;
10221 	      continue;
10222 	    }
10223 	  break;
10224 
10225 	case EQ:
10226 	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10227 	     says that the sign bit can be tested, FOO has mode MODE, C is
10228 	     GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10229 	     that may be nonzero.  */
10230 	  if (code == LSHIFTRT
10231 	      && XEXP (varop, 1) == const0_rtx
10232 	      && GET_MODE (XEXP (varop, 0)) == result_mode
10233 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10234 	      && HWI_COMPUTABLE_MODE_P (result_mode)
10235 	      && STORE_FLAG_VALUE == -1
10236 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10237 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10238 				  &complement_p))
10239 	    {
10240 	      varop = XEXP (varop, 0);
10241 	      count = 0;
10242 	      continue;
10243 	    }
10244 	  break;
10245 
10246 	case NEG:
10247 	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10248 	     than the number of bits in the mode is equivalent to A.  */
10249 	  if (code == LSHIFTRT
10250 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10251 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10252 	    {
10253 	      varop = XEXP (varop, 0);
10254 	      count = 0;
10255 	      continue;
10256 	    }
10257 
10258 	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
10259 	     NEG outside to allow shifts to combine.  */
10260 	  if (code == ASHIFT
10261 	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10262 				  &complement_p))
10263 	    {
10264 	      varop = XEXP (varop, 0);
10265 	      continue;
10266 	    }
10267 	  break;
10268 
10269 	case PLUS:
10270 	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10271 	     is one less than the number of bits in the mode is
10272 	     equivalent to (xor A 1).  */
10273 	  if (code == LSHIFTRT
10274 	      && count == (GET_MODE_PRECISION (result_mode) - 1)
10275 	      && XEXP (varop, 1) == constm1_rtx
10276 	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10277 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10278 				  &complement_p))
10279 	    {
10280 	      count = 0;
10281 	      varop = XEXP (varop, 0);
10282 	      continue;
10283 	    }
10284 
10285 	  /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10286 	     that might be nonzero in BAR are those being shifted out and those
10287 	     bits are known zero in FOO, we can replace the PLUS with FOO.
10288 	     Similarly in the other operand order.  This code occurs when
10289 	     we are computing the size of a variable-size array.  */
10290 
10291 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10292 	      && count < HOST_BITS_PER_WIDE_INT
10293 	      && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10294 	      && (nonzero_bits (XEXP (varop, 1), result_mode)
10295 		  & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10296 	    {
10297 	      varop = XEXP (varop, 0);
10298 	      continue;
10299 	    }
10300 	  else if ((code == ASHIFTRT || code == LSHIFTRT)
10301 		   && count < HOST_BITS_PER_WIDE_INT
10302 		   && HWI_COMPUTABLE_MODE_P (result_mode)
10303 		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10304 			    >> count)
10305 		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10306 			    & nonzero_bits (XEXP (varop, 1),
10307 						 result_mode)))
10308 	    {
10309 	      varop = XEXP (varop, 1);
10310 	      continue;
10311 	    }
10312 
10313 	  /* (ashift (plus foo C) N) is (plus (ashift foo N) C').  */
10314 	  if (code == ASHIFT
10315 	      && CONST_INT_P (XEXP (varop, 1))
10316 	      && (new_rtx = simplify_const_binary_operation (ASHIFT, result_mode,
10317 							 XEXP (varop, 1),
10318 							 GEN_INT (count))) != 0
10319 	      && CONST_INT_P (new_rtx)
10320 	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
10321 				  INTVAL (new_rtx), result_mode, &complement_p))
10322 	    {
10323 	      varop = XEXP (varop, 0);
10324 	      continue;
10325 	    }
10326 
10327 	  /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10328 	     signbit', and attempt to change the PLUS to an XOR and move it to
10329 	     the outer operation as is done above in the AND/IOR/XOR case
10330 	     leg for shift(logical). See details in logical handling above
10331 	     for reasoning in doing so.  */
10332 	  if (code == LSHIFTRT
10333 	      && CONST_INT_P (XEXP (varop, 1))
10334 	      && mode_signbit_p (result_mode, XEXP (varop, 1))
10335 	      && (new_rtx = simplify_const_binary_operation (code, result_mode,
10336 							 XEXP (varop, 1),
10337 							 GEN_INT (count))) != 0
10338 	      && CONST_INT_P (new_rtx)
10339 	      && merge_outer_ops (&outer_op, &outer_const, XOR,
10340 				  INTVAL (new_rtx), result_mode, &complement_p))
10341 	    {
10342 	      varop = XEXP (varop, 0);
10343 	      continue;
10344 	    }
10345 
10346 	  break;
10347 
10348 	case MINUS:
10349 	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10350 	     with C the size of VAROP - 1 and the shift is logical if
10351 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10352 	     we have a (gt X 0) operation.  If the shift is arithmetic with
10353 	     STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10354 	     we have a (neg (gt X 0)) operation.  */
10355 
10356 	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10357 	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10358 	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10359 	      && (code == LSHIFTRT || code == ASHIFTRT)
10360 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10361 	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10362 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10363 	    {
10364 	      count = 0;
10365 	      varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10366 				  const0_rtx);
10367 
10368 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10369 		varop = gen_rtx_NEG (GET_MODE (varop), varop);
10370 
10371 	      continue;
10372 	    }
10373 	  break;
10374 
10375 	case TRUNCATE:
10376 	  /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10377 	     if the truncate does not affect the value.  */
10378 	  if (code == LSHIFTRT
10379 	      && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10380 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10381 	      && (INTVAL (XEXP (XEXP (varop, 0), 1))
10382 		  >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10383 		      - GET_MODE_PRECISION (GET_MODE (varop)))))
10384 	    {
10385 	      rtx varop_inner = XEXP (varop, 0);
10386 
10387 	      varop_inner
10388 		= gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10389 				    XEXP (varop_inner, 0),
10390 				    GEN_INT
10391 				    (count + INTVAL (XEXP (varop_inner, 1))));
10392 	      varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10393 	      count = 0;
10394 	      continue;
10395 	    }
10396 	  break;
10397 
10398 	default:
10399 	  break;
10400 	}
10401 
10402       break;
10403     }
10404 
10405   shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10406 				     outer_op, outer_const);
10407 
10408   /* We have now finished analyzing the shift.  The result should be
10409      a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
10410      OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10411      to the result of the shift.  OUTER_CONST is the relevant constant,
10412      but we must turn off all bits turned off in the shift.  */
10413 
10414   if (outer_op == UNKNOWN
10415       && orig_code == code && orig_count == count
10416       && varop == orig_varop
10417       && shift_mode == GET_MODE (varop))
10418     return NULL_RTX;
10419 
10420   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
10421   varop = gen_lowpart (shift_mode, varop);
10422   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10423     return NULL_RTX;
10424 
10425   /* If we have an outer operation and we just made a shift, it is
10426      possible that we could have simplified the shift were it not
10427      for the outer operation.  So try to do the simplification
10428      recursively.  */
10429 
10430   if (outer_op != UNKNOWN)
10431     x = simplify_shift_const_1 (code, shift_mode, varop, count);
10432   else
10433     x = NULL_RTX;
10434 
10435   if (x == NULL_RTX)
10436     x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10437 
10438   /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10439      turn off all the bits that the shift would have turned off.  */
10440   if (orig_code == LSHIFTRT && result_mode != shift_mode)
10441     x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10442 				GET_MODE_MASK (result_mode) >> orig_count);
10443 
10444   /* Do the remainder of the processing in RESULT_MODE.  */
10445   x = gen_lowpart_or_truncate (result_mode, x);
10446 
10447   /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10448      operation.  */
10449   if (complement_p)
10450     x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10451 
10452   if (outer_op != UNKNOWN)
10453     {
10454       if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10455 	  && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10456 	outer_const = trunc_int_for_mode (outer_const, result_mode);
10457 
10458       if (outer_op == AND)
10459 	x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10460       else if (outer_op == SET)
10461 	{
10462 	  /* This means that we have determined that the result is
10463 	     equivalent to a constant.  This should be rare.  */
10464 	  if (!side_effects_p (x))
10465 	    x = GEN_INT (outer_const);
10466 	}
10467       else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10468 	x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10469       else
10470 	x = simplify_gen_binary (outer_op, result_mode, x,
10471 				 GEN_INT (outer_const));
10472     }
10473 
10474   return x;
10475 }
10476 
10477 /* Simplify a shift of VAROP by COUNT bits.  CODE says what kind of shift.
10478    The result of the shift is RESULT_MODE.  If we cannot simplify it,
10479    return X or, if it is NULL, synthesize the expression with
10480    simplify_gen_binary.  Otherwise, return a simplified value.
10481 
10482    The shift is normally computed in the widest mode we find in VAROP, as
10483    long as it isn't a different number of words than RESULT_MODE.  Exceptions
10484    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
10485 
10486 static rtx
10487 simplify_shift_const (rtx x, enum rtx_code code, enum machine_mode result_mode,
10488 		      rtx varop, int count)
10489 {
10490   rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10491   if (tem)
10492     return tem;
10493 
10494   if (!x)
10495     x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10496   if (GET_MODE (x) != result_mode)
10497     x = gen_lowpart (result_mode, x);
10498   return x;
10499 }
10500 
10501 
10502 /* Like recog, but we receive the address of a pointer to a new pattern.
10503    We try to match the rtx that the pointer points to.
10504    If that fails, we may try to modify or replace the pattern,
10505    storing the replacement into the same pointer object.
10506 
10507    Modifications include deletion or addition of CLOBBERs.
10508 
10509    PNOTES is a pointer to a location where any REG_UNUSED notes added for
10510    the CLOBBERs are placed.
10511 
10512    The value is the final insn code from the pattern ultimately matched,
10513    or -1.  */
10514 
10515 static int
10516 recog_for_combine (rtx *pnewpat, rtx insn, rtx *pnotes)
10517 {
10518   rtx pat = *pnewpat;
10519   rtx pat_without_clobbers;
10520   int insn_code_number;
10521   int num_clobbers_to_add = 0;
10522   int i;
10523   rtx notes = NULL_RTX;
10524   rtx old_notes, old_pat;
10525   int old_icode;
10526 
10527   /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10528      we use to indicate that something didn't match.  If we find such a
10529      thing, force rejection.  */
10530   if (GET_CODE (pat) == PARALLEL)
10531     for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10532       if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10533 	  && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10534 	return -1;
10535 
10536   old_pat = PATTERN (insn);
10537   old_notes = REG_NOTES (insn);
10538   PATTERN (insn) = pat;
10539   REG_NOTES (insn) = NULL_RTX;
10540 
10541   insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10542   if (dump_file && (dump_flags & TDF_DETAILS))
10543     {
10544       if (insn_code_number < 0)
10545 	fputs ("Failed to match this instruction:\n", dump_file);
10546       else
10547 	fputs ("Successfully matched this instruction:\n", dump_file);
10548       print_rtl_single (dump_file, pat);
10549     }
10550 
10551   /* If it isn't, there is the possibility that we previously had an insn
10552      that clobbered some register as a side effect, but the combined
10553      insn doesn't need to do that.  So try once more without the clobbers
10554      unless this represents an ASM insn.  */
10555 
10556   if (insn_code_number < 0 && ! check_asm_operands (pat)
10557       && GET_CODE (pat) == PARALLEL)
10558     {
10559       int pos;
10560 
10561       for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10562 	if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10563 	  {
10564 	    if (i != pos)
10565 	      SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10566 	    pos++;
10567 	  }
10568 
10569       SUBST_INT (XVECLEN (pat, 0), pos);
10570 
10571       if (pos == 1)
10572 	pat = XVECEXP (pat, 0, 0);
10573 
10574       PATTERN (insn) = pat;
10575       insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10576       if (dump_file && (dump_flags & TDF_DETAILS))
10577 	{
10578 	  if (insn_code_number < 0)
10579 	    fputs ("Failed to match this instruction:\n", dump_file);
10580 	  else
10581 	    fputs ("Successfully matched this instruction:\n", dump_file);
10582 	  print_rtl_single (dump_file, pat);
10583 	}
10584     }
10585 
10586   pat_without_clobbers = pat;
10587 
10588   PATTERN (insn) = old_pat;
10589   REG_NOTES (insn) = old_notes;
10590 
10591   /* Recognize all noop sets, these will be killed by followup pass.  */
10592   if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10593     insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10594 
10595   /* If we had any clobbers to add, make a new pattern than contains
10596      them.  Then check to make sure that all of them are dead.  */
10597   if (num_clobbers_to_add)
10598     {
10599       rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10600 				     rtvec_alloc (GET_CODE (pat) == PARALLEL
10601 						  ? (XVECLEN (pat, 0)
10602 						     + num_clobbers_to_add)
10603 						  : num_clobbers_to_add + 1));
10604 
10605       if (GET_CODE (pat) == PARALLEL)
10606 	for (i = 0; i < XVECLEN (pat, 0); i++)
10607 	  XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10608       else
10609 	XVECEXP (newpat, 0, 0) = pat;
10610 
10611       add_clobbers (newpat, insn_code_number);
10612 
10613       for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10614 	   i < XVECLEN (newpat, 0); i++)
10615 	{
10616 	  if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10617 	      && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10618 	    return -1;
10619 	  if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10620 	    {
10621 	      gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10622 	      notes = alloc_reg_note (REG_UNUSED,
10623 				      XEXP (XVECEXP (newpat, 0, i), 0), notes);
10624 	    }
10625 	}
10626       pat = newpat;
10627     }
10628 
10629   if (insn_code_number >= 0
10630       && insn_code_number != NOOP_MOVE_INSN_CODE)
10631     {
10632       old_pat = PATTERN (insn);
10633       old_notes = REG_NOTES (insn);
10634       old_icode = INSN_CODE (insn);
10635       PATTERN (insn) = pat;
10636       REG_NOTES (insn) = notes;
10637 
10638       /* Allow targets to reject combined insn.  */
10639       if (!targetm.legitimate_combined_insn (insn))
10640 	{
10641 	  if (dump_file && (dump_flags & TDF_DETAILS))
10642 	    fputs ("Instruction not appropriate for target.",
10643 		   dump_file);
10644 
10645 	  /* Callers expect recog_for_combine to strip
10646 	     clobbers from the pattern on failure.  */
10647 	  pat = pat_without_clobbers;
10648 	  notes = NULL_RTX;
10649 
10650 	  insn_code_number = -1;
10651 	}
10652 
10653       PATTERN (insn) = old_pat;
10654       REG_NOTES (insn) = old_notes;
10655       INSN_CODE (insn) = old_icode;
10656     }
10657 
10658   *pnewpat = pat;
10659   *pnotes = notes;
10660 
10661   return insn_code_number;
10662 }
10663 
10664 /* Like gen_lowpart_general but for use by combine.  In combine it
10665    is not possible to create any new pseudoregs.  However, it is
10666    safe to create invalid memory addresses, because combine will
10667    try to recognize them and all they will do is make the combine
10668    attempt fail.
10669 
10670    If for some reason this cannot do its job, an rtx
10671    (clobber (const_int 0)) is returned.
10672    An insn containing that will not be recognized.  */
10673 
10674 static rtx
10675 gen_lowpart_for_combine (enum machine_mode omode, rtx x)
10676 {
10677   enum machine_mode imode = GET_MODE (x);
10678   unsigned int osize = GET_MODE_SIZE (omode);
10679   unsigned int isize = GET_MODE_SIZE (imode);
10680   rtx result;
10681 
10682   if (omode == imode)
10683     return x;
10684 
10685   /* We can only support MODE being wider than a word if X is a
10686      constant integer or has a mode the same size.  */
10687   if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
10688       && ! (CONST_SCALAR_INT_P (x) || isize == osize))
10689     goto fail;
10690 
10691   /* X might be a paradoxical (subreg (mem)).  In that case, gen_lowpart
10692      won't know what to do.  So we will strip off the SUBREG here and
10693      process normally.  */
10694   if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
10695     {
10696       x = SUBREG_REG (x);
10697 
10698       /* For use in case we fall down into the address adjustments
10699 	 further below, we need to adjust the known mode and size of
10700 	 x; imode and isize, since we just adjusted x.  */
10701       imode = GET_MODE (x);
10702 
10703       if (imode == omode)
10704 	return x;
10705 
10706       isize = GET_MODE_SIZE (imode);
10707     }
10708 
10709   result = gen_lowpart_common (omode, x);
10710 
10711   if (result)
10712     return result;
10713 
10714   if (MEM_P (x))
10715     {
10716       int offset = 0;
10717 
10718       /* Refuse to work on a volatile memory ref or one with a mode-dependent
10719 	 address.  */
10720       if (MEM_VOLATILE_P (x)
10721 	  || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
10722 	goto fail;
10723 
10724       /* If we want to refer to something bigger than the original memref,
10725 	 generate a paradoxical subreg instead.  That will force a reload
10726 	 of the original memref X.  */
10727       if (isize < osize)
10728 	return gen_rtx_SUBREG (omode, x, 0);
10729 
10730       if (WORDS_BIG_ENDIAN)
10731 	offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
10732 
10733       /* Adjust the address so that the address-after-the-data is
10734 	 unchanged.  */
10735       if (BYTES_BIG_ENDIAN)
10736 	offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
10737 
10738       return adjust_address_nv (x, omode, offset);
10739     }
10740 
10741   /* If X is a comparison operator, rewrite it in a new mode.  This
10742      probably won't match, but may allow further simplifications.  */
10743   else if (COMPARISON_P (x))
10744     return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
10745 
10746   /* If we couldn't simplify X any other way, just enclose it in a
10747      SUBREG.  Normally, this SUBREG won't match, but some patterns may
10748      include an explicit SUBREG or we may simplify it further in combine.  */
10749   else
10750     {
10751       int offset = 0;
10752       rtx res;
10753 
10754       offset = subreg_lowpart_offset (omode, imode);
10755       if (imode == VOIDmode)
10756 	{
10757 	  imode = int_mode_for_mode (omode);
10758 	  x = gen_lowpart_common (imode, x);
10759 	  if (x == NULL)
10760 	    goto fail;
10761 	}
10762       res = simplify_gen_subreg (omode, x, imode, offset);
10763       if (res)
10764 	return res;
10765     }
10766 
10767  fail:
10768   return gen_rtx_CLOBBER (omode, const0_rtx);
10769 }
10770 
10771 /* Try to simplify a comparison between OP0 and a constant OP1,
10772    where CODE is the comparison code that will be tested, into a
10773    (CODE OP0 const0_rtx) form.
10774 
10775    The result is a possibly different comparison code to use.
10776    *POP1 may be updated.  */
10777 
10778 static enum rtx_code
10779 simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1)
10780 {
10781   enum machine_mode mode = GET_MODE (op0);
10782   unsigned int mode_width = GET_MODE_PRECISION (mode);
10783   HOST_WIDE_INT const_op = INTVAL (*pop1);
10784 
10785   /* Get the constant we are comparing against and turn off all bits
10786      not on in our mode.  */
10787   if (mode != VOIDmode)
10788     const_op = trunc_int_for_mode (const_op, mode);
10789 
10790   /* If we are comparing against a constant power of two and the value
10791      being compared can only have that single bit nonzero (e.g., it was
10792      `and'ed with that bit), we can replace this with a comparison
10793      with zero.  */
10794   if (const_op
10795       && (code == EQ || code == NE || code == GE || code == GEU
10796 	  || code == LT || code == LTU)
10797       && mode_width <= HOST_BITS_PER_WIDE_INT
10798       && exact_log2 (const_op) >= 0
10799       && nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) const_op)
10800     {
10801       code = (code == EQ || code == GE || code == GEU ? NE : EQ);
10802       const_op = 0;
10803     }
10804 
10805   /* Similarly, if we are comparing a value known to be either -1 or
10806      0 with -1, change it to the opposite comparison against zero.  */
10807   if (const_op == -1
10808       && (code == EQ || code == NE || code == GT || code == LE
10809 	  || code == GEU || code == LTU)
10810       && num_sign_bit_copies (op0, mode) == mode_width)
10811     {
10812       code = (code == EQ || code == LE || code == GEU ? NE : EQ);
10813       const_op = 0;
10814     }
10815 
10816   /* Do some canonicalizations based on the comparison code.  We prefer
10817      comparisons against zero and then prefer equality comparisons.
10818      If we can reduce the size of a constant, we will do that too.  */
10819   switch (code)
10820     {
10821     case LT:
10822       /* < C is equivalent to <= (C - 1) */
10823       if (const_op > 0)
10824 	{
10825 	  const_op -= 1;
10826 	  code = LE;
10827 	  /* ... fall through to LE case below.  */
10828 	}
10829       else
10830 	break;
10831 
10832     case LE:
10833       /* <= C is equivalent to < (C + 1); we do this for C < 0  */
10834       if (const_op < 0)
10835 	{
10836 	  const_op += 1;
10837 	  code = LT;
10838 	}
10839 
10840       /* If we are doing a <= 0 comparison on a value known to have
10841 	 a zero sign bit, we can replace this with == 0.  */
10842       else if (const_op == 0
10843 	       && mode_width <= HOST_BITS_PER_WIDE_INT
10844 	       && (nonzero_bits (op0, mode)
10845 		   & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10846 	       == 0)
10847 	code = EQ;
10848       break;
10849 
10850     case GE:
10851       /* >= C is equivalent to > (C - 1).  */
10852       if (const_op > 0)
10853 	{
10854 	  const_op -= 1;
10855 	  code = GT;
10856 	  /* ... fall through to GT below.  */
10857 	}
10858       else
10859 	break;
10860 
10861     case GT:
10862       /* > C is equivalent to >= (C + 1); we do this for C < 0.  */
10863       if (const_op < 0)
10864 	{
10865 	  const_op += 1;
10866 	  code = GE;
10867 	}
10868 
10869       /* If we are doing a > 0 comparison on a value known to have
10870 	 a zero sign bit, we can replace this with != 0.  */
10871       else if (const_op == 0
10872 	       && mode_width <= HOST_BITS_PER_WIDE_INT
10873 	       && (nonzero_bits (op0, mode)
10874 		   & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
10875 	       == 0)
10876 	code = NE;
10877       break;
10878 
10879     case LTU:
10880       /* < C is equivalent to <= (C - 1).  */
10881       if (const_op > 0)
10882 	{
10883 	  const_op -= 1;
10884 	  code = LEU;
10885 	  /* ... fall through ...  */
10886 	}
10887       /* (unsigned) < 0x80000000 is equivalent to >= 0.  */
10888       else if (mode_width <= HOST_BITS_PER_WIDE_INT
10889 	       && (unsigned HOST_WIDE_INT) const_op
10890 	       == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
10891 	{
10892 	  const_op = 0;
10893 	  code = GE;
10894 	  break;
10895 	}
10896       else
10897 	break;
10898 
10899     case LEU:
10900       /* unsigned <= 0 is equivalent to == 0 */
10901       if (const_op == 0)
10902 	code = EQ;
10903       /* (unsigned) <= 0x7fffffff is equivalent to >= 0.  */
10904       else if (mode_width <= HOST_BITS_PER_WIDE_INT
10905 	       && (unsigned HOST_WIDE_INT) const_op
10906 	       == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
10907 	{
10908 	  const_op = 0;
10909 	  code = GE;
10910 	}
10911       break;
10912 
10913     case GEU:
10914       /* >= C is equivalent to > (C - 1).  */
10915       if (const_op > 1)
10916 	{
10917 	  const_op -= 1;
10918 	  code = GTU;
10919 	  /* ... fall through ...  */
10920 	}
10921 
10922       /* (unsigned) >= 0x80000000 is equivalent to < 0.  */
10923       else if (mode_width <= HOST_BITS_PER_WIDE_INT
10924 	       && (unsigned HOST_WIDE_INT) const_op
10925 	       == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
10926 	{
10927 	  const_op = 0;
10928 	  code = LT;
10929 	  break;
10930 	}
10931       else
10932 	break;
10933 
10934     case GTU:
10935       /* unsigned > 0 is equivalent to != 0 */
10936       if (const_op == 0)
10937 	code = NE;
10938       /* (unsigned) > 0x7fffffff is equivalent to < 0.  */
10939       else if (mode_width <= HOST_BITS_PER_WIDE_INT
10940 	       && (unsigned HOST_WIDE_INT) const_op
10941 	       == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
10942 	{
10943 	  const_op = 0;
10944 	  code = LT;
10945 	}
10946       break;
10947 
10948     default:
10949       break;
10950     }
10951 
10952   *pop1 = GEN_INT (const_op);
10953   return code;
10954 }
10955 
10956 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
10957    comparison code that will be tested.
10958 
10959    The result is a possibly different comparison code to use.  *POP0 and
10960    *POP1 may be updated.
10961 
10962    It is possible that we might detect that a comparison is either always
10963    true or always false.  However, we do not perform general constant
10964    folding in combine, so this knowledge isn't useful.  Such tautologies
10965    should have been detected earlier.  Hence we ignore all such cases.  */
10966 
10967 static enum rtx_code
10968 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
10969 {
10970   rtx op0 = *pop0;
10971   rtx op1 = *pop1;
10972   rtx tem, tem1;
10973   int i;
10974   enum machine_mode mode, tmode;
10975 
10976   /* Try a few ways of applying the same transformation to both operands.  */
10977   while (1)
10978     {
10979 #ifndef WORD_REGISTER_OPERATIONS
10980       /* The test below this one won't handle SIGN_EXTENDs on these machines,
10981 	 so check specially.  */
10982       if (code != GTU && code != GEU && code != LTU && code != LEU
10983 	  && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
10984 	  && GET_CODE (XEXP (op0, 0)) == ASHIFT
10985 	  && GET_CODE (XEXP (op1, 0)) == ASHIFT
10986 	  && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
10987 	  && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
10988 	  && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
10989 	      == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
10990 	  && CONST_INT_P (XEXP (op0, 1))
10991 	  && XEXP (op0, 1) == XEXP (op1, 1)
10992 	  && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
10993 	  && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
10994 	  && (INTVAL (XEXP (op0, 1))
10995 	      == (GET_MODE_PRECISION (GET_MODE (op0))
10996 		  - (GET_MODE_PRECISION
10997 		     (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
10998 	{
10999 	  op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11000 	  op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11001 	}
11002 #endif
11003 
11004       /* If both operands are the same constant shift, see if we can ignore the
11005 	 shift.  We can if the shift is a rotate or if the bits shifted out of
11006 	 this shift are known to be zero for both inputs and if the type of
11007 	 comparison is compatible with the shift.  */
11008       if (GET_CODE (op0) == GET_CODE (op1)
11009 	  && HWI_COMPUTABLE_MODE_P (GET_MODE(op0))
11010 	  && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11011 	      || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11012 		  && (code != GT && code != LT && code != GE && code != LE))
11013 	      || (GET_CODE (op0) == ASHIFTRT
11014 		  && (code != GTU && code != LTU
11015 		      && code != GEU && code != LEU)))
11016 	  && CONST_INT_P (XEXP (op0, 1))
11017 	  && INTVAL (XEXP (op0, 1)) >= 0
11018 	  && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11019 	  && XEXP (op0, 1) == XEXP (op1, 1))
11020 	{
11021 	  enum machine_mode mode = GET_MODE (op0);
11022 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11023 	  int shift_count = INTVAL (XEXP (op0, 1));
11024 
11025 	  if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11026 	    mask &= (mask >> shift_count) << shift_count;
11027 	  else if (GET_CODE (op0) == ASHIFT)
11028 	    mask = (mask & (mask << shift_count)) >> shift_count;
11029 
11030 	  if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11031 	      && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11032 	    op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11033 	  else
11034 	    break;
11035 	}
11036 
11037       /* If both operands are AND's of a paradoxical SUBREG by constant, the
11038 	 SUBREGs are of the same mode, and, in both cases, the AND would
11039 	 be redundant if the comparison was done in the narrower mode,
11040 	 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11041 	 and the operand's possibly nonzero bits are 0xffffff01; in that case
11042 	 if we only care about QImode, we don't need the AND).  This case
11043 	 occurs if the output mode of an scc insn is not SImode and
11044 	 STORE_FLAG_VALUE == 1 (e.g., the 386).
11045 
11046 	 Similarly, check for a case where the AND's are ZERO_EXTEND
11047 	 operations from some narrower mode even though a SUBREG is not
11048 	 present.  */
11049 
11050       else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11051 	       && CONST_INT_P (XEXP (op0, 1))
11052 	       && CONST_INT_P (XEXP (op1, 1)))
11053 	{
11054 	  rtx inner_op0 = XEXP (op0, 0);
11055 	  rtx inner_op1 = XEXP (op1, 0);
11056 	  HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11057 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11058 	  int changed = 0;
11059 
11060 	  if (paradoxical_subreg_p (inner_op0)
11061 	      && GET_CODE (inner_op1) == SUBREG
11062 	      && (GET_MODE (SUBREG_REG (inner_op0))
11063 		  == GET_MODE (SUBREG_REG (inner_op1)))
11064 	      && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11065 		  <= HOST_BITS_PER_WIDE_INT)
11066 	      && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11067 					     GET_MODE (SUBREG_REG (inner_op0)))))
11068 	      && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11069 					     GET_MODE (SUBREG_REG (inner_op1))))))
11070 	    {
11071 	      op0 = SUBREG_REG (inner_op0);
11072 	      op1 = SUBREG_REG (inner_op1);
11073 
11074 	      /* The resulting comparison is always unsigned since we masked
11075 		 off the original sign bit.  */
11076 	      code = unsigned_condition (code);
11077 
11078 	      changed = 1;
11079 	    }
11080 
11081 	  else if (c0 == c1)
11082 	    for (tmode = GET_CLASS_NARROWEST_MODE
11083 		 (GET_MODE_CLASS (GET_MODE (op0)));
11084 		 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11085 	      if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11086 		{
11087 		  op0 = gen_lowpart (tmode, inner_op0);
11088 		  op1 = gen_lowpart (tmode, inner_op1);
11089 		  code = unsigned_condition (code);
11090 		  changed = 1;
11091 		  break;
11092 		}
11093 
11094 	  if (! changed)
11095 	    break;
11096 	}
11097 
11098       /* If both operands are NOT, we can strip off the outer operation
11099 	 and adjust the comparison code for swapped operands; similarly for
11100 	 NEG, except that this must be an equality comparison.  */
11101       else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11102 	       || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11103 		   && (code == EQ || code == NE)))
11104 	op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11105 
11106       else
11107 	break;
11108     }
11109 
11110   /* If the first operand is a constant, swap the operands and adjust the
11111      comparison code appropriately, but don't do this if the second operand
11112      is already a constant integer.  */
11113   if (swap_commutative_operands_p (op0, op1))
11114     {
11115       tem = op0, op0 = op1, op1 = tem;
11116       code = swap_condition (code);
11117     }
11118 
11119   /* We now enter a loop during which we will try to simplify the comparison.
11120      For the most part, we only are concerned with comparisons with zero,
11121      but some things may really be comparisons with zero but not start
11122      out looking that way.  */
11123 
11124   while (CONST_INT_P (op1))
11125     {
11126       enum machine_mode mode = GET_MODE (op0);
11127       unsigned int mode_width = GET_MODE_PRECISION (mode);
11128       unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11129       int equality_comparison_p;
11130       int sign_bit_comparison_p;
11131       int unsigned_comparison_p;
11132       HOST_WIDE_INT const_op;
11133 
11134       /* We only want to handle integral modes.  This catches VOIDmode,
11135 	 CCmode, and the floating-point modes.  An exception is that we
11136 	 can handle VOIDmode if OP0 is a COMPARE or a comparison
11137 	 operation.  */
11138 
11139       if (GET_MODE_CLASS (mode) != MODE_INT
11140 	  && ! (mode == VOIDmode
11141 		&& (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11142 	break;
11143 
11144       /* Try to simplify the compare to constant, possibly changing the
11145 	 comparison op, and/or changing op1 to zero.  */
11146       code = simplify_compare_const (code, op0, &op1);
11147       const_op = INTVAL (op1);
11148 
11149       /* Compute some predicates to simplify code below.  */
11150 
11151       equality_comparison_p = (code == EQ || code == NE);
11152       sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11153       unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11154 			       || code == GEU);
11155 
11156       /* If this is a sign bit comparison and we can do arithmetic in
11157 	 MODE, say that we will only be needing the sign bit of OP0.  */
11158       if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11159 	op0 = force_to_mode (op0, mode,
11160 			     (unsigned HOST_WIDE_INT) 1
11161 			     << (GET_MODE_PRECISION (mode) - 1),
11162 			     0);
11163 
11164       /* Now try cases based on the opcode of OP0.  If none of the cases
11165 	 does a "continue", we exit this loop immediately after the
11166 	 switch.  */
11167 
11168       switch (GET_CODE (op0))
11169 	{
11170 	case ZERO_EXTRACT:
11171 	  /* If we are extracting a single bit from a variable position in
11172 	     a constant that has only a single bit set and are comparing it
11173 	     with zero, we can convert this into an equality comparison
11174 	     between the position and the location of the single bit.  */
11175 	  /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11176 	     have already reduced the shift count modulo the word size.  */
11177 	  if (!SHIFT_COUNT_TRUNCATED
11178 	      && CONST_INT_P (XEXP (op0, 0))
11179 	      && XEXP (op0, 1) == const1_rtx
11180 	      && equality_comparison_p && const_op == 0
11181 	      && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11182 	    {
11183 	      if (BITS_BIG_ENDIAN)
11184 		i = BITS_PER_WORD - 1 - i;
11185 
11186 	      op0 = XEXP (op0, 2);
11187 	      op1 = GEN_INT (i);
11188 	      const_op = i;
11189 
11190 	      /* Result is nonzero iff shift count is equal to I.  */
11191 	      code = reverse_condition (code);
11192 	      continue;
11193 	    }
11194 
11195 	  /* ... fall through ...  */
11196 
11197 	case SIGN_EXTRACT:
11198 	  tem = expand_compound_operation (op0);
11199 	  if (tem != op0)
11200 	    {
11201 	      op0 = tem;
11202 	      continue;
11203 	    }
11204 	  break;
11205 
11206 	case NOT:
11207 	  /* If testing for equality, we can take the NOT of the constant.  */
11208 	  if (equality_comparison_p
11209 	      && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11210 	    {
11211 	      op0 = XEXP (op0, 0);
11212 	      op1 = tem;
11213 	      continue;
11214 	    }
11215 
11216 	  /* If just looking at the sign bit, reverse the sense of the
11217 	     comparison.  */
11218 	  if (sign_bit_comparison_p)
11219 	    {
11220 	      op0 = XEXP (op0, 0);
11221 	      code = (code == GE ? LT : GE);
11222 	      continue;
11223 	    }
11224 	  break;
11225 
11226 	case NEG:
11227 	  /* If testing for equality, we can take the NEG of the constant.  */
11228 	  if (equality_comparison_p
11229 	      && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11230 	    {
11231 	      op0 = XEXP (op0, 0);
11232 	      op1 = tem;
11233 	      continue;
11234 	    }
11235 
11236 	  /* The remaining cases only apply to comparisons with zero.  */
11237 	  if (const_op != 0)
11238 	    break;
11239 
11240 	  /* When X is ABS or is known positive,
11241 	     (neg X) is < 0 if and only if X != 0.  */
11242 
11243 	  if (sign_bit_comparison_p
11244 	      && (GET_CODE (XEXP (op0, 0)) == ABS
11245 		  || (mode_width <= HOST_BITS_PER_WIDE_INT
11246 		      && (nonzero_bits (XEXP (op0, 0), mode)
11247 			  & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11248 			 == 0)))
11249 	    {
11250 	      op0 = XEXP (op0, 0);
11251 	      code = (code == LT ? NE : EQ);
11252 	      continue;
11253 	    }
11254 
11255 	  /* If we have NEG of something whose two high-order bits are the
11256 	     same, we know that "(-a) < 0" is equivalent to "a > 0".  */
11257 	  if (num_sign_bit_copies (op0, mode) >= 2)
11258 	    {
11259 	      op0 = XEXP (op0, 0);
11260 	      code = swap_condition (code);
11261 	      continue;
11262 	    }
11263 	  break;
11264 
11265 	case ROTATE:
11266 	  /* If we are testing equality and our count is a constant, we
11267 	     can perform the inverse operation on our RHS.  */
11268 	  if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11269 	      && (tem = simplify_binary_operation (ROTATERT, mode,
11270 						   op1, XEXP (op0, 1))) != 0)
11271 	    {
11272 	      op0 = XEXP (op0, 0);
11273 	      op1 = tem;
11274 	      continue;
11275 	    }
11276 
11277 	  /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11278 	     a particular bit.  Convert it to an AND of a constant of that
11279 	     bit.  This will be converted into a ZERO_EXTRACT.  */
11280 	  if (const_op == 0 && sign_bit_comparison_p
11281 	      && CONST_INT_P (XEXP (op0, 1))
11282 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
11283 	    {
11284 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11285 					    ((unsigned HOST_WIDE_INT) 1
11286 					     << (mode_width - 1
11287 						 - INTVAL (XEXP (op0, 1)))));
11288 	      code = (code == LT ? NE : EQ);
11289 	      continue;
11290 	    }
11291 
11292 	  /* Fall through.  */
11293 
11294 	case ABS:
11295 	  /* ABS is ignorable inside an equality comparison with zero.  */
11296 	  if (const_op == 0 && equality_comparison_p)
11297 	    {
11298 	      op0 = XEXP (op0, 0);
11299 	      continue;
11300 	    }
11301 	  break;
11302 
11303 	case SIGN_EXTEND:
11304 	  /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11305 	     (compare FOO CONST) if CONST fits in FOO's mode and we
11306 	     are either testing inequality or have an unsigned
11307 	     comparison with ZERO_EXTEND or a signed comparison with
11308 	     SIGN_EXTEND.  But don't do it if we don't have a compare
11309 	     insn of the given mode, since we'd have to revert it
11310 	     later on, and then we wouldn't know whether to sign- or
11311 	     zero-extend.  */
11312 	  mode = GET_MODE (XEXP (op0, 0));
11313 	  if (GET_MODE_CLASS (mode) == MODE_INT
11314 	      && ! unsigned_comparison_p
11315 	      && HWI_COMPUTABLE_MODE_P (mode)
11316 	      && trunc_int_for_mode (const_op, mode) == const_op
11317 	      && have_insn_for (COMPARE, mode))
11318 	    {
11319 	      op0 = XEXP (op0, 0);
11320 	      continue;
11321 	    }
11322 	  break;
11323 
11324 	case SUBREG:
11325 	  /* Check for the case where we are comparing A - C1 with C2, that is
11326 
11327 	       (subreg:MODE (plus (A) (-C1))) op (C2)
11328 
11329 	     with C1 a constant, and try to lift the SUBREG, i.e. to do the
11330 	     comparison in the wider mode.  One of the following two conditions
11331 	     must be true in order for this to be valid:
11332 
11333 	       1. The mode extension results in the same bit pattern being added
11334 		  on both sides and the comparison is equality or unsigned.  As
11335 		  C2 has been truncated to fit in MODE, the pattern can only be
11336 		  all 0s or all 1s.
11337 
11338 	       2. The mode extension results in the sign bit being copied on
11339 		  each side.
11340 
11341 	     The difficulty here is that we have predicates for A but not for
11342 	     (A - C1) so we need to check that C1 is within proper bounds so
11343 	     as to perturbate A as little as possible.  */
11344 
11345 	  if (mode_width <= HOST_BITS_PER_WIDE_INT
11346 	      && subreg_lowpart_p (op0)
11347 	      && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11348 	      && GET_CODE (SUBREG_REG (op0)) == PLUS
11349 	      && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11350 	    {
11351 	      enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11352 	      rtx a = XEXP (SUBREG_REG (op0), 0);
11353 	      HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11354 
11355 	      if ((c1 > 0
11356 		   && (unsigned HOST_WIDE_INT) c1
11357 		       < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11358 		   && (equality_comparison_p || unsigned_comparison_p)
11359 		   /* (A - C1) zero-extends if it is positive and sign-extends
11360 		      if it is negative, C2 both zero- and sign-extends.  */
11361 		   && ((0 == (nonzero_bits (a, inner_mode)
11362 			      & ~GET_MODE_MASK (mode))
11363 			&& const_op >= 0)
11364 		       /* (A - C1) sign-extends if it is positive and 1-extends
11365 			  if it is negative, C2 both sign- and 1-extends.  */
11366 		       || (num_sign_bit_copies (a, inner_mode)
11367 			   > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11368 					     - mode_width)
11369 			   && const_op < 0)))
11370 		  || ((unsigned HOST_WIDE_INT) c1
11371 		       < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11372 		      /* (A - C1) always sign-extends, like C2.  */
11373 		      && num_sign_bit_copies (a, inner_mode)
11374 			 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11375 					   - (mode_width - 1))))
11376 		{
11377 		  op0 = SUBREG_REG (op0);
11378 		  continue;
11379 		}
11380 	    }
11381 
11382 	  /* If the inner mode is narrower and we are extracting the low part,
11383 	     we can treat the SUBREG as if it were a ZERO_EXTEND.  */
11384 	  if (subreg_lowpart_p (op0)
11385 	      && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11386 	    /* Fall through */ ;
11387 	  else
11388 	    break;
11389 
11390 	  /* ... fall through ...  */
11391 
11392 	case ZERO_EXTEND:
11393 	  mode = GET_MODE (XEXP (op0, 0));
11394 	  if (GET_MODE_CLASS (mode) == MODE_INT
11395 	      && (unsigned_comparison_p || equality_comparison_p)
11396 	      && HWI_COMPUTABLE_MODE_P (mode)
11397 	      && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11398 	      && const_op >= 0
11399 	      && have_insn_for (COMPARE, mode))
11400 	    {
11401 	      op0 = XEXP (op0, 0);
11402 	      continue;
11403 	    }
11404 	  break;
11405 
11406 	case PLUS:
11407 	  /* (eq (plus X A) B) -> (eq X (minus B A)).  We can only do
11408 	     this for equality comparisons due to pathological cases involving
11409 	     overflows.  */
11410 	  if (equality_comparison_p
11411 	      && 0 != (tem = simplify_binary_operation (MINUS, mode,
11412 							op1, XEXP (op0, 1))))
11413 	    {
11414 	      op0 = XEXP (op0, 0);
11415 	      op1 = tem;
11416 	      continue;
11417 	    }
11418 
11419 	  /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0.  */
11420 	  if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11421 	      && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11422 	    {
11423 	      op0 = XEXP (XEXP (op0, 0), 0);
11424 	      code = (code == LT ? EQ : NE);
11425 	      continue;
11426 	    }
11427 	  break;
11428 
11429 	case MINUS:
11430 	  /* We used to optimize signed comparisons against zero, but that
11431 	     was incorrect.  Unsigned comparisons against zero (GTU, LEU)
11432 	     arrive here as equality comparisons, or (GEU, LTU) are
11433 	     optimized away.  No need to special-case them.  */
11434 
11435 	  /* (eq (minus A B) C) -> (eq A (plus B C)) or
11436 	     (eq B (minus A C)), whichever simplifies.  We can only do
11437 	     this for equality comparisons due to pathological cases involving
11438 	     overflows.  */
11439 	  if (equality_comparison_p
11440 	      && 0 != (tem = simplify_binary_operation (PLUS, mode,
11441 							XEXP (op0, 1), op1)))
11442 	    {
11443 	      op0 = XEXP (op0, 0);
11444 	      op1 = tem;
11445 	      continue;
11446 	    }
11447 
11448 	  if (equality_comparison_p
11449 	      && 0 != (tem = simplify_binary_operation (MINUS, mode,
11450 							XEXP (op0, 0), op1)))
11451 	    {
11452 	      op0 = XEXP (op0, 1);
11453 	      op1 = tem;
11454 	      continue;
11455 	    }
11456 
11457 	  /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11458 	     of bits in X minus 1, is one iff X > 0.  */
11459 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11460 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11461 	      && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11462 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11463 	    {
11464 	      op0 = XEXP (op0, 1);
11465 	      code = (code == GE ? LE : GT);
11466 	      continue;
11467 	    }
11468 	  break;
11469 
11470 	case XOR:
11471 	  /* (eq (xor A B) C) -> (eq A (xor B C)).  This is a simplification
11472 	     if C is zero or B is a constant.  */
11473 	  if (equality_comparison_p
11474 	      && 0 != (tem = simplify_binary_operation (XOR, mode,
11475 							XEXP (op0, 1), op1)))
11476 	    {
11477 	      op0 = XEXP (op0, 0);
11478 	      op1 = tem;
11479 	      continue;
11480 	    }
11481 	  break;
11482 
11483 	case EQ:  case NE:
11484 	case UNEQ:  case LTGT:
11485 	case LT:  case LTU:  case UNLT:  case LE:  case LEU:  case UNLE:
11486 	case GT:  case GTU:  case UNGT:  case GE:  case GEU:  case UNGE:
11487 	case UNORDERED: case ORDERED:
11488 	  /* We can't do anything if OP0 is a condition code value, rather
11489 	     than an actual data value.  */
11490 	  if (const_op != 0
11491 	      || CC0_P (XEXP (op0, 0))
11492 	      || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11493 	    break;
11494 
11495 	  /* Get the two operands being compared.  */
11496 	  if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11497 	    tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11498 	  else
11499 	    tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11500 
11501 	  /* Check for the cases where we simply want the result of the
11502 	     earlier test or the opposite of that result.  */
11503 	  if (code == NE || code == EQ
11504 	      || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
11505 		  && (code == LT || code == GE)))
11506 	    {
11507 	      enum rtx_code new_code;
11508 	      if (code == LT || code == NE)
11509 		new_code = GET_CODE (op0);
11510 	      else
11511 		new_code = reversed_comparison_code (op0, NULL);
11512 
11513 	      if (new_code != UNKNOWN)
11514 		{
11515 		  code = new_code;
11516 		  op0 = tem;
11517 		  op1 = tem1;
11518 		  continue;
11519 		}
11520 	    }
11521 	  break;
11522 
11523 	case IOR:
11524 	  /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11525 	     iff X <= 0.  */
11526 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11527 	      && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11528 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11529 	    {
11530 	      op0 = XEXP (op0, 1);
11531 	      code = (code == GE ? GT : LE);
11532 	      continue;
11533 	    }
11534 	  break;
11535 
11536 	case AND:
11537 	  /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1).  This
11538 	     will be converted to a ZERO_EXTRACT later.  */
11539 	  if (const_op == 0 && equality_comparison_p
11540 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
11541 	      && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11542 	    {
11543 	      op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11544 				      XEXP (XEXP (op0, 0), 1));
11545 	      op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11546 	      continue;
11547 	    }
11548 
11549 	  /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11550 	     zero and X is a comparison and C1 and C2 describe only bits set
11551 	     in STORE_FLAG_VALUE, we can compare with X.  */
11552 	  if (const_op == 0 && equality_comparison_p
11553 	      && mode_width <= HOST_BITS_PER_WIDE_INT
11554 	      && CONST_INT_P (XEXP (op0, 1))
11555 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
11556 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11557 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
11558 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
11559 	    {
11560 	      mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11561 		      << INTVAL (XEXP (XEXP (op0, 0), 1)));
11562 	      if ((~STORE_FLAG_VALUE & mask) == 0
11563 		  && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
11564 		      || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
11565 			  && COMPARISON_P (tem))))
11566 		{
11567 		  op0 = XEXP (XEXP (op0, 0), 0);
11568 		  continue;
11569 		}
11570 	    }
11571 
11572 	  /* If we are doing an equality comparison of an AND of a bit equal
11573 	     to the sign bit, replace this with a LT or GE comparison of
11574 	     the underlying value.  */
11575 	  if (equality_comparison_p
11576 	      && const_op == 0
11577 	      && CONST_INT_P (XEXP (op0, 1))
11578 	      && mode_width <= HOST_BITS_PER_WIDE_INT
11579 	      && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
11580 		  == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11581 	    {
11582 	      op0 = XEXP (op0, 0);
11583 	      code = (code == EQ ? GE : LT);
11584 	      continue;
11585 	    }
11586 
11587 	  /* If this AND operation is really a ZERO_EXTEND from a narrower
11588 	     mode, the constant fits within that mode, and this is either an
11589 	     equality or unsigned comparison, try to do this comparison in
11590 	     the narrower mode.
11591 
11592 	     Note that in:
11593 
11594 	     (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
11595 	     -> (ne:DI (reg:SI 4) (const_int 0))
11596 
11597 	     unless TRULY_NOOP_TRUNCATION allows it or the register is
11598 	     known to hold a value of the required mode the
11599 	     transformation is invalid.  */
11600 	  if ((equality_comparison_p || unsigned_comparison_p)
11601 	      && CONST_INT_P (XEXP (op0, 1))
11602 	      && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
11603 				   & GET_MODE_MASK (mode))
11604 				  + 1)) >= 0
11605 	      && const_op >> i == 0
11606 	      && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
11607 	      && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
11608 		  || (REG_P (XEXP (op0, 0))
11609 		      && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
11610 	    {
11611 	      op0 = gen_lowpart (tmode, XEXP (op0, 0));
11612 	      continue;
11613 	    }
11614 
11615 	  /* If this is (and:M1 (subreg:M2 X 0) (const_int C1)) where C1
11616 	     fits in both M1 and M2 and the SUBREG is either paradoxical
11617 	     or represents the low part, permute the SUBREG and the AND
11618 	     and try again.  */
11619 	  if (GET_CODE (XEXP (op0, 0)) == SUBREG)
11620 	    {
11621 	      unsigned HOST_WIDE_INT c1;
11622 	      tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
11623 	      /* Require an integral mode, to avoid creating something like
11624 		 (AND:SF ...).  */
11625 	      if (SCALAR_INT_MODE_P (tmode)
11626 		  /* It is unsafe to commute the AND into the SUBREG if the
11627 		     SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
11628 		     not defined.  As originally written the upper bits
11629 		     have a defined value due to the AND operation.
11630 		     However, if we commute the AND inside the SUBREG then
11631 		     they no longer have defined values and the meaning of
11632 		     the code has been changed.  */
11633 		  && (0
11634 #ifdef WORD_REGISTER_OPERATIONS
11635 		      || (mode_width > GET_MODE_PRECISION (tmode)
11636 			  && mode_width <= BITS_PER_WORD)
11637 #endif
11638 		      || (mode_width <= GET_MODE_PRECISION (tmode)
11639 			  && subreg_lowpart_p (XEXP (op0, 0))))
11640 		  && CONST_INT_P (XEXP (op0, 1))
11641 		  && mode_width <= HOST_BITS_PER_WIDE_INT
11642 		  && HWI_COMPUTABLE_MODE_P (tmode)
11643 		  && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0
11644 		  && (c1 & ~GET_MODE_MASK (tmode)) == 0
11645 		  && c1 != mask
11646 		  && c1 != GET_MODE_MASK (tmode))
11647 		{
11648 		  op0 = simplify_gen_binary (AND, tmode,
11649 					     SUBREG_REG (XEXP (op0, 0)),
11650 					     gen_int_mode (c1, tmode));
11651 		  op0 = gen_lowpart (mode, op0);
11652 		  continue;
11653 		}
11654 	    }
11655 
11656 	  /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0).  */
11657 	  if (const_op == 0 && equality_comparison_p
11658 	      && XEXP (op0, 1) == const1_rtx
11659 	      && GET_CODE (XEXP (op0, 0)) == NOT)
11660 	    {
11661 	      op0 = simplify_and_const_int (NULL_RTX, mode,
11662 					    XEXP (XEXP (op0, 0), 0), 1);
11663 	      code = (code == NE ? EQ : NE);
11664 	      continue;
11665 	    }
11666 
11667 	  /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
11668 	     (eq (and (lshiftrt X) 1) 0).
11669 	     Also handle the case where (not X) is expressed using xor.  */
11670 	  if (const_op == 0 && equality_comparison_p
11671 	      && XEXP (op0, 1) == const1_rtx
11672 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
11673 	    {
11674 	      rtx shift_op = XEXP (XEXP (op0, 0), 0);
11675 	      rtx shift_count = XEXP (XEXP (op0, 0), 1);
11676 
11677 	      if (GET_CODE (shift_op) == NOT
11678 		  || (GET_CODE (shift_op) == XOR
11679 		      && CONST_INT_P (XEXP (shift_op, 1))
11680 		      && CONST_INT_P (shift_count)
11681 		      && HWI_COMPUTABLE_MODE_P (mode)
11682 		      && (UINTVAL (XEXP (shift_op, 1))
11683 			  == (unsigned HOST_WIDE_INT) 1
11684 			       << INTVAL (shift_count))))
11685 		{
11686 		  op0
11687 		    = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
11688 		  op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11689 		  code = (code == NE ? EQ : NE);
11690 		  continue;
11691 		}
11692 	    }
11693 	  break;
11694 
11695 	case ASHIFT:
11696 	  /* If we have (compare (ashift FOO N) (const_int C)) and
11697 	     the high order N bits of FOO (N+1 if an inequality comparison)
11698 	     are known to be zero, we can do this by comparing FOO with C
11699 	     shifted right N bits so long as the low-order N bits of C are
11700 	     zero.  */
11701 	  if (CONST_INT_P (XEXP (op0, 1))
11702 	      && INTVAL (XEXP (op0, 1)) >= 0
11703 	      && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
11704 		  < HOST_BITS_PER_WIDE_INT)
11705 	      && (((unsigned HOST_WIDE_INT) const_op
11706 		   & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
11707 		      - 1)) == 0)
11708 	      && mode_width <= HOST_BITS_PER_WIDE_INT
11709 	      && (nonzero_bits (XEXP (op0, 0), mode)
11710 		  & ~(mask >> (INTVAL (XEXP (op0, 1))
11711 			       + ! equality_comparison_p))) == 0)
11712 	    {
11713 	      /* We must perform a logical shift, not an arithmetic one,
11714 		 as we want the top N bits of C to be zero.  */
11715 	      unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
11716 
11717 	      temp >>= INTVAL (XEXP (op0, 1));
11718 	      op1 = gen_int_mode (temp, mode);
11719 	      op0 = XEXP (op0, 0);
11720 	      continue;
11721 	    }
11722 
11723 	  /* If we are doing a sign bit comparison, it means we are testing
11724 	     a particular bit.  Convert it to the appropriate AND.  */
11725 	  if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
11726 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
11727 	    {
11728 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11729 					    ((unsigned HOST_WIDE_INT) 1
11730 					     << (mode_width - 1
11731 						 - INTVAL (XEXP (op0, 1)))));
11732 	      code = (code == LT ? NE : EQ);
11733 	      continue;
11734 	    }
11735 
11736 	  /* If this an equality comparison with zero and we are shifting
11737 	     the low bit to the sign bit, we can convert this to an AND of the
11738 	     low-order bit.  */
11739 	  if (const_op == 0 && equality_comparison_p
11740 	      && CONST_INT_P (XEXP (op0, 1))
11741 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11742 	    {
11743 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
11744 	      continue;
11745 	    }
11746 	  break;
11747 
11748 	case ASHIFTRT:
11749 	  /* If this is an equality comparison with zero, we can do this
11750 	     as a logical shift, which might be much simpler.  */
11751 	  if (equality_comparison_p && const_op == 0
11752 	      && CONST_INT_P (XEXP (op0, 1)))
11753 	    {
11754 	      op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
11755 					  XEXP (op0, 0),
11756 					  INTVAL (XEXP (op0, 1)));
11757 	      continue;
11758 	    }
11759 
11760 	  /* If OP0 is a sign extension and CODE is not an unsigned comparison,
11761 	     do the comparison in a narrower mode.  */
11762 	  if (! unsigned_comparison_p
11763 	      && CONST_INT_P (XEXP (op0, 1))
11764 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
11765 	      && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11766 	      && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11767 					 MODE_INT, 1)) != BLKmode
11768 	      && (((unsigned HOST_WIDE_INT) const_op
11769 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
11770 		  <= GET_MODE_MASK (tmode)))
11771 	    {
11772 	      op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
11773 	      continue;
11774 	    }
11775 
11776 	  /* Likewise if OP0 is a PLUS of a sign extension with a
11777 	     constant, which is usually represented with the PLUS
11778 	     between the shifts.  */
11779 	  if (! unsigned_comparison_p
11780 	      && CONST_INT_P (XEXP (op0, 1))
11781 	      && GET_CODE (XEXP (op0, 0)) == PLUS
11782 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11783 	      && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
11784 	      && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
11785 	      && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
11786 					 MODE_INT, 1)) != BLKmode
11787 	      && (((unsigned HOST_WIDE_INT) const_op
11788 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
11789 		  <= GET_MODE_MASK (tmode)))
11790 	    {
11791 	      rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
11792 	      rtx add_const = XEXP (XEXP (op0, 0), 1);
11793 	      rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
11794 						   add_const, XEXP (op0, 1));
11795 
11796 	      op0 = simplify_gen_binary (PLUS, tmode,
11797 					 gen_lowpart (tmode, inner),
11798 					 new_const);
11799 	      continue;
11800 	    }
11801 
11802 	  /* ... fall through ...  */
11803 	case LSHIFTRT:
11804 	  /* If we have (compare (xshiftrt FOO N) (const_int C)) and
11805 	     the low order N bits of FOO are known to be zero, we can do this
11806 	     by comparing FOO with C shifted left N bits so long as no
11807 	     overflow occurs.  Even if the low order N bits of FOO aren't known
11808 	     to be zero, if the comparison is >= or < we can use the same
11809 	     optimization and for > or <= by setting all the low
11810 	     order N bits in the comparison constant.  */
11811 	  if (CONST_INT_P (XEXP (op0, 1))
11812 	      && INTVAL (XEXP (op0, 1)) > 0
11813 	      && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11814 	      && mode_width <= HOST_BITS_PER_WIDE_INT
11815 	      && (((unsigned HOST_WIDE_INT) const_op
11816 		   + (GET_CODE (op0) != LSHIFTRT
11817 		      ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
11818 			 + 1)
11819 		      : 0))
11820 		  <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
11821 	    {
11822 	      unsigned HOST_WIDE_INT low_bits
11823 		= (nonzero_bits (XEXP (op0, 0), mode)
11824 		   & (((unsigned HOST_WIDE_INT) 1
11825 		       << INTVAL (XEXP (op0, 1))) - 1));
11826 	      if (low_bits == 0 || !equality_comparison_p)
11827 		{
11828 		  /* If the shift was logical, then we must make the condition
11829 		     unsigned.  */
11830 		  if (GET_CODE (op0) == LSHIFTRT)
11831 		    code = unsigned_condition (code);
11832 
11833 		  const_op <<= INTVAL (XEXP (op0, 1));
11834 		  if (low_bits != 0
11835 		      && (code == GT || code == GTU
11836 			  || code == LE || code == LEU))
11837 		    const_op
11838 		      |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
11839 		  op1 = GEN_INT (const_op);
11840 		  op0 = XEXP (op0, 0);
11841 		  continue;
11842 		}
11843 	    }
11844 
11845 	  /* If we are using this shift to extract just the sign bit, we
11846 	     can replace this with an LT or GE comparison.  */
11847 	  if (const_op == 0
11848 	      && (equality_comparison_p || sign_bit_comparison_p)
11849 	      && CONST_INT_P (XEXP (op0, 1))
11850 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
11851 	    {
11852 	      op0 = XEXP (op0, 0);
11853 	      code = (code == NE || code == GT ? LT : GE);
11854 	      continue;
11855 	    }
11856 	  break;
11857 
11858 	default:
11859 	  break;
11860 	}
11861 
11862       break;
11863     }
11864 
11865   /* Now make any compound operations involved in this comparison.  Then,
11866      check for an outmost SUBREG on OP0 that is not doing anything or is
11867      paradoxical.  The latter transformation must only be performed when
11868      it is known that the "extra" bits will be the same in op0 and op1 or
11869      that they don't matter.  There are three cases to consider:
11870 
11871      1. SUBREG_REG (op0) is a register.  In this case the bits are don't
11872      care bits and we can assume they have any convenient value.  So
11873      making the transformation is safe.
11874 
11875      2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
11876      In this case the upper bits of op0 are undefined.  We should not make
11877      the simplification in that case as we do not know the contents of
11878      those bits.
11879 
11880      3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
11881      UNKNOWN.  In that case we know those bits are zeros or ones.  We must
11882      also be sure that they are the same as the upper bits of op1.
11883 
11884      We can never remove a SUBREG for a non-equality comparison because
11885      the sign bit is in a different place in the underlying object.  */
11886 
11887   op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
11888   op1 = make_compound_operation (op1, SET);
11889 
11890   if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
11891       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
11892       && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
11893       && (code == NE || code == EQ))
11894     {
11895       if (paradoxical_subreg_p (op0))
11896 	{
11897 	  /* For paradoxical subregs, allow case 1 as above.  Case 3 isn't
11898 	     implemented.  */
11899 	  if (REG_P (SUBREG_REG (op0)))
11900 	    {
11901 	      op0 = SUBREG_REG (op0);
11902 	      op1 = gen_lowpart (GET_MODE (op0), op1);
11903 	    }
11904 	}
11905       else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
11906 		<= HOST_BITS_PER_WIDE_INT)
11907 	       && (nonzero_bits (SUBREG_REG (op0),
11908 				 GET_MODE (SUBREG_REG (op0)))
11909 		   & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11910 	{
11911 	  tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
11912 
11913 	  if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
11914 	       & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
11915 	    op0 = SUBREG_REG (op0), op1 = tem;
11916 	}
11917     }
11918 
11919   /* We now do the opposite procedure: Some machines don't have compare
11920      insns in all modes.  If OP0's mode is an integer mode smaller than a
11921      word and we can't do a compare in that mode, see if there is a larger
11922      mode for which we can do the compare.  There are a number of cases in
11923      which we can use the wider mode.  */
11924 
11925   mode = GET_MODE (op0);
11926   if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
11927       && GET_MODE_SIZE (mode) < UNITS_PER_WORD
11928       && ! have_insn_for (COMPARE, mode))
11929     for (tmode = GET_MODE_WIDER_MODE (mode);
11930 	 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
11931 	 tmode = GET_MODE_WIDER_MODE (tmode))
11932       if (have_insn_for (COMPARE, tmode))
11933 	{
11934 	  int zero_extended;
11935 
11936 	  /* If this is a test for negative, we can make an explicit
11937 	     test of the sign bit.  Test this first so we can use
11938 	     a paradoxical subreg to extend OP0.  */
11939 
11940 	  if (op1 == const0_rtx && (code == LT || code == GE)
11941 	      && HWI_COMPUTABLE_MODE_P (mode))
11942 	    {
11943 	      op0 = simplify_gen_binary (AND, tmode,
11944 					 gen_lowpart (tmode, op0),
11945 					 GEN_INT ((unsigned HOST_WIDE_INT) 1
11946 						  << (GET_MODE_BITSIZE (mode)
11947 						      - 1)));
11948 	      code = (code == LT) ? NE : EQ;
11949 	      break;
11950 	    }
11951 
11952 	  /* If the only nonzero bits in OP0 and OP1 are those in the
11953 	     narrower mode and this is an equality or unsigned comparison,
11954 	     we can use the wider mode.  Similarly for sign-extended
11955 	     values, in which case it is true for all comparisons.  */
11956 	  zero_extended = ((code == EQ || code == NE
11957 			    || code == GEU || code == GTU
11958 			    || code == LEU || code == LTU)
11959 			   && (nonzero_bits (op0, tmode)
11960 			       & ~GET_MODE_MASK (mode)) == 0
11961 			   && ((CONST_INT_P (op1)
11962 				|| (nonzero_bits (op1, tmode)
11963 				    & ~GET_MODE_MASK (mode)) == 0)));
11964 
11965 	  if (zero_extended
11966 	      || ((num_sign_bit_copies (op0, tmode)
11967 		   > (unsigned int) (GET_MODE_PRECISION (tmode)
11968 				     - GET_MODE_PRECISION (mode)))
11969 		  && (num_sign_bit_copies (op1, tmode)
11970 		      > (unsigned int) (GET_MODE_PRECISION (tmode)
11971 					- GET_MODE_PRECISION (mode)))))
11972 	    {
11973 	      /* If OP0 is an AND and we don't have an AND in MODE either,
11974 		 make a new AND in the proper mode.  */
11975 	      if (GET_CODE (op0) == AND
11976 		  && !have_insn_for (AND, mode))
11977 		op0 = simplify_gen_binary (AND, tmode,
11978 					   gen_lowpart (tmode,
11979 							XEXP (op0, 0)),
11980 					   gen_lowpart (tmode,
11981 							XEXP (op0, 1)));
11982 	      else
11983 		{
11984 		  if (zero_extended)
11985 		    {
11986 		      op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
11987 		      op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
11988 		    }
11989 		  else
11990 		    {
11991 		      op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
11992 		      op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
11993 		    }
11994 		  break;
11995 		}
11996 	    }
11997 	}
11998 
11999   /* If this machine only supports a subset of valid comparisons, see if we
12000      can convert an unsupported one into a supported one.  */
12001   target_canonicalize_comparison (&code, &op0, &op1, 0);
12002 
12003   *pop0 = op0;
12004   *pop1 = op1;
12005 
12006   return code;
12007 }
12008 
12009 /* Utility function for record_value_for_reg.  Count number of
12010    rtxs in X.  */
12011 static int
12012 count_rtxs (rtx x)
12013 {
12014   enum rtx_code code = GET_CODE (x);
12015   const char *fmt;
12016   int i, j, ret = 1;
12017 
12018   if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12019       || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12020     {
12021       rtx x0 = XEXP (x, 0);
12022       rtx x1 = XEXP (x, 1);
12023 
12024       if (x0 == x1)
12025 	return 1 + 2 * count_rtxs (x0);
12026 
12027       if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12028 	   || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12029 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12030 	return 2 + 2 * count_rtxs (x0)
12031 	       + count_rtxs (x == XEXP (x1, 0)
12032 			     ? XEXP (x1, 1) : XEXP (x1, 0));
12033 
12034       if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12035 	   || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12036 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12037 	return 2 + 2 * count_rtxs (x1)
12038 	       + count_rtxs (x == XEXP (x0, 0)
12039 			     ? XEXP (x0, 1) : XEXP (x0, 0));
12040     }
12041 
12042   fmt = GET_RTX_FORMAT (code);
12043   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12044     if (fmt[i] == 'e')
12045       ret += count_rtxs (XEXP (x, i));
12046     else if (fmt[i] == 'E')
12047       for (j = 0; j < XVECLEN (x, i); j++)
12048 	ret += count_rtxs (XVECEXP (x, i, j));
12049 
12050   return ret;
12051 }
12052 
12053 /* Utility function for following routine.  Called when X is part of a value
12054    being stored into last_set_value.  Sets last_set_table_tick
12055    for each register mentioned.  Similar to mention_regs in cse.c  */
12056 
12057 static void
12058 update_table_tick (rtx x)
12059 {
12060   enum rtx_code code = GET_CODE (x);
12061   const char *fmt = GET_RTX_FORMAT (code);
12062   int i, j;
12063 
12064   if (code == REG)
12065     {
12066       unsigned int regno = REGNO (x);
12067       unsigned int endregno = END_REGNO (x);
12068       unsigned int r;
12069 
12070       for (r = regno; r < endregno; r++)
12071 	{
12072 	  reg_stat_type *rsp = &reg_stat[r];
12073 	  rsp->last_set_table_tick = label_tick;
12074 	}
12075 
12076       return;
12077     }
12078 
12079   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12080     if (fmt[i] == 'e')
12081       {
12082 	/* Check for identical subexpressions.  If x contains
12083 	   identical subexpression we only have to traverse one of
12084 	   them.  */
12085 	if (i == 0 && ARITHMETIC_P (x))
12086 	  {
12087 	    /* Note that at this point x1 has already been
12088 	       processed.  */
12089 	    rtx x0 = XEXP (x, 0);
12090 	    rtx x1 = XEXP (x, 1);
12091 
12092 	    /* If x0 and x1 are identical then there is no need to
12093 	       process x0.  */
12094 	    if (x0 == x1)
12095 	      break;
12096 
12097 	    /* If x0 is identical to a subexpression of x1 then while
12098 	       processing x1, x0 has already been processed.  Thus we
12099 	       are done with x.  */
12100 	    if (ARITHMETIC_P (x1)
12101 		&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12102 	      break;
12103 
12104 	    /* If x1 is identical to a subexpression of x0 then we
12105 	       still have to process the rest of x0.  */
12106 	    if (ARITHMETIC_P (x0)
12107 		&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12108 	      {
12109 		update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12110 		break;
12111 	      }
12112 	  }
12113 
12114 	update_table_tick (XEXP (x, i));
12115       }
12116     else if (fmt[i] == 'E')
12117       for (j = 0; j < XVECLEN (x, i); j++)
12118 	update_table_tick (XVECEXP (x, i, j));
12119 }
12120 
12121 /* Record that REG is set to VALUE in insn INSN.  If VALUE is zero, we
12122    are saying that the register is clobbered and we no longer know its
12123    value.  If INSN is zero, don't update reg_stat[].last_set; this is
12124    only permitted with VALUE also zero and is used to invalidate the
12125    register.  */
12126 
12127 static void
12128 record_value_for_reg (rtx reg, rtx insn, rtx value)
12129 {
12130   unsigned int regno = REGNO (reg);
12131   unsigned int endregno = END_REGNO (reg);
12132   unsigned int i;
12133   reg_stat_type *rsp;
12134 
12135   /* If VALUE contains REG and we have a previous value for REG, substitute
12136      the previous value.  */
12137   if (value && insn && reg_overlap_mentioned_p (reg, value))
12138     {
12139       rtx tem;
12140 
12141       /* Set things up so get_last_value is allowed to see anything set up to
12142 	 our insn.  */
12143       subst_low_luid = DF_INSN_LUID (insn);
12144       tem = get_last_value (reg);
12145 
12146       /* If TEM is simply a binary operation with two CLOBBERs as operands,
12147 	 it isn't going to be useful and will take a lot of time to process,
12148 	 so just use the CLOBBER.  */
12149 
12150       if (tem)
12151 	{
12152 	  if (ARITHMETIC_P (tem)
12153 	      && GET_CODE (XEXP (tem, 0)) == CLOBBER
12154 	      && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12155 	    tem = XEXP (tem, 0);
12156 	  else if (count_occurrences (value, reg, 1) >= 2)
12157 	    {
12158 	      /* If there are two or more occurrences of REG in VALUE,
12159 		 prevent the value from growing too much.  */
12160 	      if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12161 		tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12162 	    }
12163 
12164 	  value = replace_rtx (copy_rtx (value), reg, tem);
12165 	}
12166     }
12167 
12168   /* For each register modified, show we don't know its value, that
12169      we don't know about its bitwise content, that its value has been
12170      updated, and that we don't know the location of the death of the
12171      register.  */
12172   for (i = regno; i < endregno; i++)
12173     {
12174       rsp = &reg_stat[i];
12175 
12176       if (insn)
12177 	rsp->last_set = insn;
12178 
12179       rsp->last_set_value = 0;
12180       rsp->last_set_mode = VOIDmode;
12181       rsp->last_set_nonzero_bits = 0;
12182       rsp->last_set_sign_bit_copies = 0;
12183       rsp->last_death = 0;
12184       rsp->truncated_to_mode = VOIDmode;
12185     }
12186 
12187   /* Mark registers that are being referenced in this value.  */
12188   if (value)
12189     update_table_tick (value);
12190 
12191   /* Now update the status of each register being set.
12192      If someone is using this register in this block, set this register
12193      to invalid since we will get confused between the two lives in this
12194      basic block.  This makes using this register always invalid.  In cse, we
12195      scan the table to invalidate all entries using this register, but this
12196      is too much work for us.  */
12197 
12198   for (i = regno; i < endregno; i++)
12199     {
12200       rsp = &reg_stat[i];
12201       rsp->last_set_label = label_tick;
12202       if (!insn
12203 	  || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12204 	rsp->last_set_invalid = 1;
12205       else
12206 	rsp->last_set_invalid = 0;
12207     }
12208 
12209   /* The value being assigned might refer to X (like in "x++;").  In that
12210      case, we must replace it with (clobber (const_int 0)) to prevent
12211      infinite loops.  */
12212   rsp = &reg_stat[regno];
12213   if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12214     {
12215       value = copy_rtx (value);
12216       if (!get_last_value_validate (&value, insn, label_tick, 1))
12217 	value = 0;
12218     }
12219 
12220   /* For the main register being modified, update the value, the mode, the
12221      nonzero bits, and the number of sign bit copies.  */
12222 
12223   rsp->last_set_value = value;
12224 
12225   if (value)
12226     {
12227       enum machine_mode mode = GET_MODE (reg);
12228       subst_low_luid = DF_INSN_LUID (insn);
12229       rsp->last_set_mode = mode;
12230       if (GET_MODE_CLASS (mode) == MODE_INT
12231 	  && HWI_COMPUTABLE_MODE_P (mode))
12232 	mode = nonzero_bits_mode;
12233       rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12234       rsp->last_set_sign_bit_copies
12235 	= num_sign_bit_copies (value, GET_MODE (reg));
12236     }
12237 }
12238 
12239 /* Called via note_stores from record_dead_and_set_regs to handle one
12240    SET or CLOBBER in an insn.  DATA is the instruction in which the
12241    set is occurring.  */
12242 
12243 static void
12244 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12245 {
12246   rtx record_dead_insn = (rtx) data;
12247 
12248   if (GET_CODE (dest) == SUBREG)
12249     dest = SUBREG_REG (dest);
12250 
12251   if (!record_dead_insn)
12252     {
12253       if (REG_P (dest))
12254 	record_value_for_reg (dest, NULL_RTX, NULL_RTX);
12255       return;
12256     }
12257 
12258   if (REG_P (dest))
12259     {
12260       /* If we are setting the whole register, we know its value.  Otherwise
12261 	 show that we don't know the value.  We can handle SUBREG in
12262 	 some cases.  */
12263       if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12264 	record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12265       else if (GET_CODE (setter) == SET
12266 	       && GET_CODE (SET_DEST (setter)) == SUBREG
12267 	       && SUBREG_REG (SET_DEST (setter)) == dest
12268 	       && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12269 	       && subreg_lowpart_p (SET_DEST (setter)))
12270 	record_value_for_reg (dest, record_dead_insn,
12271 			      gen_lowpart (GET_MODE (dest),
12272 						       SET_SRC (setter)));
12273       else
12274 	record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12275     }
12276   else if (MEM_P (dest)
12277 	   /* Ignore pushes, they clobber nothing.  */
12278 	   && ! push_operand (dest, GET_MODE (dest)))
12279     mem_last_set = DF_INSN_LUID (record_dead_insn);
12280 }
12281 
12282 /* Update the records of when each REG was most recently set or killed
12283    for the things done by INSN.  This is the last thing done in processing
12284    INSN in the combiner loop.
12285 
12286    We update reg_stat[], in particular fields last_set, last_set_value,
12287    last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12288    last_death, and also the similar information mem_last_set (which insn
12289    most recently modified memory) and last_call_luid (which insn was the
12290    most recent subroutine call).  */
12291 
12292 static void
12293 record_dead_and_set_regs (rtx insn)
12294 {
12295   rtx link;
12296   unsigned int i;
12297 
12298   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12299     {
12300       if (REG_NOTE_KIND (link) == REG_DEAD
12301 	  && REG_P (XEXP (link, 0)))
12302 	{
12303 	  unsigned int regno = REGNO (XEXP (link, 0));
12304 	  unsigned int endregno = END_REGNO (XEXP (link, 0));
12305 
12306 	  for (i = regno; i < endregno; i++)
12307 	    {
12308 	      reg_stat_type *rsp;
12309 
12310 	      rsp = &reg_stat[i];
12311 	      rsp->last_death = insn;
12312 	    }
12313 	}
12314       else if (REG_NOTE_KIND (link) == REG_INC)
12315 	record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12316     }
12317 
12318   if (CALL_P (insn))
12319     {
12320       hard_reg_set_iterator hrsi;
12321       EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
12322 	{
12323 	  reg_stat_type *rsp;
12324 
12325 	  rsp = &reg_stat[i];
12326 	  rsp->last_set_invalid = 1;
12327 	  rsp->last_set = insn;
12328 	  rsp->last_set_value = 0;
12329 	  rsp->last_set_mode = VOIDmode;
12330 	  rsp->last_set_nonzero_bits = 0;
12331 	  rsp->last_set_sign_bit_copies = 0;
12332 	  rsp->last_death = 0;
12333 	  rsp->truncated_to_mode = VOIDmode;
12334 	}
12335 
12336       last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12337 
12338       /* We can't combine into a call pattern.  Remember, though, that
12339 	 the return value register is set at this LUID.  We could
12340 	 still replace a register with the return value from the
12341 	 wrong subroutine call!  */
12342       note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12343     }
12344   else
12345     note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12346 }
12347 
12348 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12349    register present in the SUBREG, so for each such SUBREG go back and
12350    adjust nonzero and sign bit information of the registers that are
12351    known to have some zero/sign bits set.
12352 
12353    This is needed because when combine blows the SUBREGs away, the
12354    information on zero/sign bits is lost and further combines can be
12355    missed because of that.  */
12356 
12357 static void
12358 record_promoted_value (rtx insn, rtx subreg)
12359 {
12360   struct insn_link *links;
12361   rtx set;
12362   unsigned int regno = REGNO (SUBREG_REG (subreg));
12363   enum machine_mode mode = GET_MODE (subreg);
12364 
12365   if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12366     return;
12367 
12368   for (links = LOG_LINKS (insn); links;)
12369     {
12370       reg_stat_type *rsp;
12371 
12372       insn = links->insn;
12373       set = single_set (insn);
12374 
12375       if (! set || !REG_P (SET_DEST (set))
12376 	  || REGNO (SET_DEST (set)) != regno
12377 	  || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12378 	{
12379 	  links = links->next;
12380 	  continue;
12381 	}
12382 
12383       rsp = &reg_stat[regno];
12384       if (rsp->last_set == insn)
12385 	{
12386 	  if (SUBREG_PROMOTED_UNSIGNED_P (subreg) > 0)
12387 	    rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12388 	}
12389 
12390       if (REG_P (SET_SRC (set)))
12391 	{
12392 	  regno = REGNO (SET_SRC (set));
12393 	  links = LOG_LINKS (insn);
12394 	}
12395       else
12396 	break;
12397     }
12398 }
12399 
12400 /* Check if X, a register, is known to contain a value already
12401    truncated to MODE.  In this case we can use a subreg to refer to
12402    the truncated value even though in the generic case we would need
12403    an explicit truncation.  */
12404 
12405 static bool
12406 reg_truncated_to_mode (enum machine_mode mode, const_rtx x)
12407 {
12408   reg_stat_type *rsp = &reg_stat[REGNO (x)];
12409   enum machine_mode truncated = rsp->truncated_to_mode;
12410 
12411   if (truncated == 0
12412       || rsp->truncation_label < label_tick_ebb_start)
12413     return false;
12414   if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12415     return true;
12416   if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12417     return true;
12418   return false;
12419 }
12420 
12421 /* Callback for for_each_rtx.  If *P is a hard reg or a subreg record the mode
12422    that the register is accessed in.  For non-TRULY_NOOP_TRUNCATION targets we
12423    might be able to turn a truncate into a subreg using this information.
12424    Return -1 if traversing *P is complete or 0 otherwise.  */
12425 
12426 static int
12427 record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED)
12428 {
12429   rtx x = *p;
12430   enum machine_mode truncated_mode;
12431   reg_stat_type *rsp;
12432 
12433   if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12434     {
12435       enum machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12436       truncated_mode = GET_MODE (x);
12437 
12438       if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12439 	return -1;
12440 
12441       if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
12442 	return -1;
12443 
12444       x = SUBREG_REG (x);
12445     }
12446   /* ??? For hard-regs we now record everything.  We might be able to
12447      optimize this using last_set_mode.  */
12448   else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12449     truncated_mode = GET_MODE (x);
12450   else
12451     return 0;
12452 
12453   rsp = &reg_stat[REGNO (x)];
12454   if (rsp->truncated_to_mode == 0
12455       || rsp->truncation_label < label_tick_ebb_start
12456       || (GET_MODE_SIZE (truncated_mode)
12457 	  < GET_MODE_SIZE (rsp->truncated_to_mode)))
12458     {
12459       rsp->truncated_to_mode = truncated_mode;
12460       rsp->truncation_label = label_tick;
12461     }
12462 
12463   return -1;
12464 }
12465 
12466 /* Callback for note_uses.  Find hardregs and subregs of pseudos and
12467    the modes they are used in.  This can help truning TRUNCATEs into
12468    SUBREGs.  */
12469 
12470 static void
12471 record_truncated_values (rtx *x, void *data ATTRIBUTE_UNUSED)
12472 {
12473   for_each_rtx (x, record_truncated_value, NULL);
12474 }
12475 
12476 /* Scan X for promoted SUBREGs.  For each one found,
12477    note what it implies to the registers used in it.  */
12478 
12479 static void
12480 check_promoted_subreg (rtx insn, rtx x)
12481 {
12482   if (GET_CODE (x) == SUBREG
12483       && SUBREG_PROMOTED_VAR_P (x)
12484       && REG_P (SUBREG_REG (x)))
12485     record_promoted_value (insn, x);
12486   else
12487     {
12488       const char *format = GET_RTX_FORMAT (GET_CODE (x));
12489       int i, j;
12490 
12491       for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12492 	switch (format[i])
12493 	  {
12494 	  case 'e':
12495 	    check_promoted_subreg (insn, XEXP (x, i));
12496 	    break;
12497 	  case 'V':
12498 	  case 'E':
12499 	    if (XVEC (x, i) != 0)
12500 	      for (j = 0; j < XVECLEN (x, i); j++)
12501 		check_promoted_subreg (insn, XVECEXP (x, i, j));
12502 	    break;
12503 	  }
12504     }
12505 }
12506 
12507 /* Verify that all the registers and memory references mentioned in *LOC are
12508    still valid.  *LOC was part of a value set in INSN when label_tick was
12509    equal to TICK.  Return 0 if some are not.  If REPLACE is nonzero, replace
12510    the invalid references with (clobber (const_int 0)) and return 1.  This
12511    replacement is useful because we often can get useful information about
12512    the form of a value (e.g., if it was produced by a shift that always
12513    produces -1 or 0) even though we don't know exactly what registers it
12514    was produced from.  */
12515 
12516 static int
12517 get_last_value_validate (rtx *loc, rtx insn, int tick, int replace)
12518 {
12519   rtx x = *loc;
12520   const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12521   int len = GET_RTX_LENGTH (GET_CODE (x));
12522   int i, j;
12523 
12524   if (REG_P (x))
12525     {
12526       unsigned int regno = REGNO (x);
12527       unsigned int endregno = END_REGNO (x);
12528       unsigned int j;
12529 
12530       for (j = regno; j < endregno; j++)
12531 	{
12532 	  reg_stat_type *rsp = &reg_stat[j];
12533 	  if (rsp->last_set_invalid
12534 	      /* If this is a pseudo-register that was only set once and not
12535 		 live at the beginning of the function, it is always valid.  */
12536 	      || (! (regno >= FIRST_PSEUDO_REGISTER
12537 		     && REG_N_SETS (regno) == 1
12538 		     && (!REGNO_REG_SET_P
12539 			 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno)))
12540 		  && rsp->last_set_label > tick))
12541 	  {
12542 	    if (replace)
12543 	      *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12544 	    return replace;
12545 	  }
12546 	}
12547 
12548       return 1;
12549     }
12550   /* If this is a memory reference, make sure that there were no stores after
12551      it that might have clobbered the value.  We don't have alias info, so we
12552      assume any store invalidates it.  Moreover, we only have local UIDs, so
12553      we also assume that there were stores in the intervening basic blocks.  */
12554   else if (MEM_P (x) && !MEM_READONLY_P (x)
12555 	   && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
12556     {
12557       if (replace)
12558 	*loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
12559       return replace;
12560     }
12561 
12562   for (i = 0; i < len; i++)
12563     {
12564       if (fmt[i] == 'e')
12565 	{
12566 	  /* Check for identical subexpressions.  If x contains
12567 	     identical subexpression we only have to traverse one of
12568 	     them.  */
12569 	  if (i == 1 && ARITHMETIC_P (x))
12570 	    {
12571 	      /* Note that at this point x0 has already been checked
12572 		 and found valid.  */
12573 	      rtx x0 = XEXP (x, 0);
12574 	      rtx x1 = XEXP (x, 1);
12575 
12576 	      /* If x0 and x1 are identical then x is also valid.  */
12577 	      if (x0 == x1)
12578 		return 1;
12579 
12580 	      /* If x1 is identical to a subexpression of x0 then
12581 		 while checking x0, x1 has already been checked.  Thus
12582 		 it is valid and so as x.  */
12583 	      if (ARITHMETIC_P (x0)
12584 		  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12585 		return 1;
12586 
12587 	      /* If x0 is identical to a subexpression of x1 then x is
12588 		 valid iff the rest of x1 is valid.  */
12589 	      if (ARITHMETIC_P (x1)
12590 		  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12591 		return
12592 		  get_last_value_validate (&XEXP (x1,
12593 						  x0 == XEXP (x1, 0) ? 1 : 0),
12594 					   insn, tick, replace);
12595 	    }
12596 
12597 	  if (get_last_value_validate (&XEXP (x, i), insn, tick,
12598 				       replace) == 0)
12599 	    return 0;
12600 	}
12601       else if (fmt[i] == 'E')
12602 	for (j = 0; j < XVECLEN (x, i); j++)
12603 	  if (get_last_value_validate (&XVECEXP (x, i, j),
12604 				       insn, tick, replace) == 0)
12605 	    return 0;
12606     }
12607 
12608   /* If we haven't found a reason for it to be invalid, it is valid.  */
12609   return 1;
12610 }
12611 
12612 /* Get the last value assigned to X, if known.  Some registers
12613    in the value may be replaced with (clobber (const_int 0)) if their value
12614    is known longer known reliably.  */
12615 
12616 static rtx
12617 get_last_value (const_rtx x)
12618 {
12619   unsigned int regno;
12620   rtx value;
12621   reg_stat_type *rsp;
12622 
12623   /* If this is a non-paradoxical SUBREG, get the value of its operand and
12624      then convert it to the desired mode.  If this is a paradoxical SUBREG,
12625      we cannot predict what values the "extra" bits might have.  */
12626   if (GET_CODE (x) == SUBREG
12627       && subreg_lowpart_p (x)
12628       && !paradoxical_subreg_p (x)
12629       && (value = get_last_value (SUBREG_REG (x))) != 0)
12630     return gen_lowpart (GET_MODE (x), value);
12631 
12632   if (!REG_P (x))
12633     return 0;
12634 
12635   regno = REGNO (x);
12636   rsp = &reg_stat[regno];
12637   value = rsp->last_set_value;
12638 
12639   /* If we don't have a value, or if it isn't for this basic block and
12640      it's either a hard register, set more than once, or it's a live
12641      at the beginning of the function, return 0.
12642 
12643      Because if it's not live at the beginning of the function then the reg
12644      is always set before being used (is never used without being set).
12645      And, if it's set only once, and it's always set before use, then all
12646      uses must have the same last value, even if it's not from this basic
12647      block.  */
12648 
12649   if (value == 0
12650       || (rsp->last_set_label < label_tick_ebb_start
12651 	  && (regno < FIRST_PSEUDO_REGISTER
12652 	      || REG_N_SETS (regno) != 1
12653 	      || REGNO_REG_SET_P
12654 		 (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), regno))))
12655     return 0;
12656 
12657   /* If the value was set in a later insn than the ones we are processing,
12658      we can't use it even if the register was only set once.  */
12659   if (rsp->last_set_label == label_tick
12660       && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
12661     return 0;
12662 
12663   /* If the value has all its registers valid, return it.  */
12664   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
12665     return value;
12666 
12667   /* Otherwise, make a copy and replace any invalid register with
12668      (clobber (const_int 0)).  If that fails for some reason, return 0.  */
12669 
12670   value = copy_rtx (value);
12671   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
12672     return value;
12673 
12674   return 0;
12675 }
12676 
12677 /* Return nonzero if expression X refers to a REG or to memory
12678    that is set in an instruction more recent than FROM_LUID.  */
12679 
12680 static int
12681 use_crosses_set_p (const_rtx x, int from_luid)
12682 {
12683   const char *fmt;
12684   int i;
12685   enum rtx_code code = GET_CODE (x);
12686 
12687   if (code == REG)
12688     {
12689       unsigned int regno = REGNO (x);
12690       unsigned endreg = END_REGNO (x);
12691 
12692 #ifdef PUSH_ROUNDING
12693       /* Don't allow uses of the stack pointer to be moved,
12694 	 because we don't know whether the move crosses a push insn.  */
12695       if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
12696 	return 1;
12697 #endif
12698       for (; regno < endreg; regno++)
12699 	{
12700 	  reg_stat_type *rsp = &reg_stat[regno];
12701 	  if (rsp->last_set
12702 	      && rsp->last_set_label == label_tick
12703 	      && DF_INSN_LUID (rsp->last_set) > from_luid)
12704 	    return 1;
12705 	}
12706       return 0;
12707     }
12708 
12709   if (code == MEM && mem_last_set > from_luid)
12710     return 1;
12711 
12712   fmt = GET_RTX_FORMAT (code);
12713 
12714   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12715     {
12716       if (fmt[i] == 'E')
12717 	{
12718 	  int j;
12719 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12720 	    if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
12721 	      return 1;
12722 	}
12723       else if (fmt[i] == 'e'
12724 	       && use_crosses_set_p (XEXP (x, i), from_luid))
12725 	return 1;
12726     }
12727   return 0;
12728 }
12729 
12730 /* Define three variables used for communication between the following
12731    routines.  */
12732 
12733 static unsigned int reg_dead_regno, reg_dead_endregno;
12734 static int reg_dead_flag;
12735 
12736 /* Function called via note_stores from reg_dead_at_p.
12737 
12738    If DEST is within [reg_dead_regno, reg_dead_endregno), set
12739    reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET.  */
12740 
12741 static void
12742 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
12743 {
12744   unsigned int regno, endregno;
12745 
12746   if (!REG_P (dest))
12747     return;
12748 
12749   regno = REGNO (dest);
12750   endregno = END_REGNO (dest);
12751   if (reg_dead_endregno > regno && reg_dead_regno < endregno)
12752     reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
12753 }
12754 
12755 /* Return nonzero if REG is known to be dead at INSN.
12756 
12757    We scan backwards from INSN.  If we hit a REG_DEAD note or a CLOBBER
12758    referencing REG, it is dead.  If we hit a SET referencing REG, it is
12759    live.  Otherwise, see if it is live or dead at the start of the basic
12760    block we are in.  Hard regs marked as being live in NEWPAT_USED_REGS
12761    must be assumed to be always live.  */
12762 
12763 static int
12764 reg_dead_at_p (rtx reg, rtx insn)
12765 {
12766   basic_block block;
12767   unsigned int i;
12768 
12769   /* Set variables for reg_dead_at_p_1.  */
12770   reg_dead_regno = REGNO (reg);
12771   reg_dead_endregno = END_REGNO (reg);
12772 
12773   reg_dead_flag = 0;
12774 
12775   /* Check that reg isn't mentioned in NEWPAT_USED_REGS.  For fixed registers
12776      we allow the machine description to decide whether use-and-clobber
12777      patterns are OK.  */
12778   if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
12779     {
12780       for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12781 	if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
12782 	  return 0;
12783     }
12784 
12785   /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
12786      beginning of basic block.  */
12787   block = BLOCK_FOR_INSN (insn);
12788   for (;;)
12789     {
12790       if (INSN_P (insn))
12791         {
12792 	  note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
12793 	  if (reg_dead_flag)
12794 	    return reg_dead_flag == 1 ? 1 : 0;
12795 
12796 	  if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
12797 	    return 1;
12798         }
12799 
12800       if (insn == BB_HEAD (block))
12801 	break;
12802 
12803       insn = PREV_INSN (insn);
12804     }
12805 
12806   /* Look at live-in sets for the basic block that we were in.  */
12807   for (i = reg_dead_regno; i < reg_dead_endregno; i++)
12808     if (REGNO_REG_SET_P (df_get_live_in (block), i))
12809       return 0;
12810 
12811   return 1;
12812 }
12813 
12814 /* Note hard registers in X that are used.  */
12815 
12816 static void
12817 mark_used_regs_combine (rtx x)
12818 {
12819   RTX_CODE code = GET_CODE (x);
12820   unsigned int regno;
12821   int i;
12822 
12823   switch (code)
12824     {
12825     case LABEL_REF:
12826     case SYMBOL_REF:
12827     case CONST:
12828     CASE_CONST_ANY:
12829     case PC:
12830     case ADDR_VEC:
12831     case ADDR_DIFF_VEC:
12832     case ASM_INPUT:
12833 #ifdef HAVE_cc0
12834     /* CC0 must die in the insn after it is set, so we don't need to take
12835        special note of it here.  */
12836     case CC0:
12837 #endif
12838       return;
12839 
12840     case CLOBBER:
12841       /* If we are clobbering a MEM, mark any hard registers inside the
12842 	 address as used.  */
12843       if (MEM_P (XEXP (x, 0)))
12844 	mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
12845       return;
12846 
12847     case REG:
12848       regno = REGNO (x);
12849       /* A hard reg in a wide mode may really be multiple registers.
12850 	 If so, mark all of them just like the first.  */
12851       if (regno < FIRST_PSEUDO_REGISTER)
12852 	{
12853 	  /* None of this applies to the stack, frame or arg pointers.  */
12854 	  if (regno == STACK_POINTER_REGNUM
12855 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
12856 	      || regno == HARD_FRAME_POINTER_REGNUM
12857 #endif
12858 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
12859 	      || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
12860 #endif
12861 	      || regno == FRAME_POINTER_REGNUM)
12862 	    return;
12863 
12864 	  add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
12865 	}
12866       return;
12867 
12868     case SET:
12869       {
12870 	/* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
12871 	   the address.  */
12872 	rtx testreg = SET_DEST (x);
12873 
12874 	while (GET_CODE (testreg) == SUBREG
12875 	       || GET_CODE (testreg) == ZERO_EXTRACT
12876 	       || GET_CODE (testreg) == STRICT_LOW_PART)
12877 	  testreg = XEXP (testreg, 0);
12878 
12879 	if (MEM_P (testreg))
12880 	  mark_used_regs_combine (XEXP (testreg, 0));
12881 
12882 	mark_used_regs_combine (SET_SRC (x));
12883       }
12884       return;
12885 
12886     default:
12887       break;
12888     }
12889 
12890   /* Recursively scan the operands of this expression.  */
12891 
12892   {
12893     const char *fmt = GET_RTX_FORMAT (code);
12894 
12895     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12896       {
12897 	if (fmt[i] == 'e')
12898 	  mark_used_regs_combine (XEXP (x, i));
12899 	else if (fmt[i] == 'E')
12900 	  {
12901 	    int j;
12902 
12903 	    for (j = 0; j < XVECLEN (x, i); j++)
12904 	      mark_used_regs_combine (XVECEXP (x, i, j));
12905 	  }
12906       }
12907   }
12908 }
12909 
12910 /* Remove register number REGNO from the dead registers list of INSN.
12911 
12912    Return the note used to record the death, if there was one.  */
12913 
12914 rtx
12915 remove_death (unsigned int regno, rtx insn)
12916 {
12917   rtx note = find_regno_note (insn, REG_DEAD, regno);
12918 
12919   if (note)
12920     remove_note (insn, note);
12921 
12922   return note;
12923 }
12924 
12925 /* For each register (hardware or pseudo) used within expression X, if its
12926    death is in an instruction with luid between FROM_LUID (inclusive) and
12927    TO_INSN (exclusive), put a REG_DEAD note for that register in the
12928    list headed by PNOTES.
12929 
12930    That said, don't move registers killed by maybe_kill_insn.
12931 
12932    This is done when X is being merged by combination into TO_INSN.  These
12933    notes will then be distributed as needed.  */
12934 
12935 static void
12936 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx to_insn,
12937 	     rtx *pnotes)
12938 {
12939   const char *fmt;
12940   int len, i;
12941   enum rtx_code code = GET_CODE (x);
12942 
12943   if (code == REG)
12944     {
12945       unsigned int regno = REGNO (x);
12946       rtx where_dead = reg_stat[regno].last_death;
12947 
12948       /* Don't move the register if it gets killed in between from and to.  */
12949       if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
12950 	  && ! reg_referenced_p (x, maybe_kill_insn))
12951 	return;
12952 
12953       if (where_dead
12954 	  && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
12955 	  && DF_INSN_LUID (where_dead) >= from_luid
12956 	  && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
12957 	{
12958 	  rtx note = remove_death (regno, where_dead);
12959 
12960 	  /* It is possible for the call above to return 0.  This can occur
12961 	     when last_death points to I2 or I1 that we combined with.
12962 	     In that case make a new note.
12963 
12964 	     We must also check for the case where X is a hard register
12965 	     and NOTE is a death note for a range of hard registers
12966 	     including X.  In that case, we must put REG_DEAD notes for
12967 	     the remaining registers in place of NOTE.  */
12968 
12969 	  if (note != 0 && regno < FIRST_PSEUDO_REGISTER
12970 	      && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12971 		  > GET_MODE_SIZE (GET_MODE (x))))
12972 	    {
12973 	      unsigned int deadregno = REGNO (XEXP (note, 0));
12974 	      unsigned int deadend = END_HARD_REGNO (XEXP (note, 0));
12975 	      unsigned int ourend = END_HARD_REGNO (x);
12976 	      unsigned int i;
12977 
12978 	      for (i = deadregno; i < deadend; i++)
12979 		if (i < regno || i >= ourend)
12980 		  add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
12981 	    }
12982 
12983 	  /* If we didn't find any note, or if we found a REG_DEAD note that
12984 	     covers only part of the given reg, and we have a multi-reg hard
12985 	     register, then to be safe we must check for REG_DEAD notes
12986 	     for each register other than the first.  They could have
12987 	     their own REG_DEAD notes lying around.  */
12988 	  else if ((note == 0
12989 		    || (note != 0
12990 			&& (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
12991 			    < GET_MODE_SIZE (GET_MODE (x)))))
12992 		   && regno < FIRST_PSEUDO_REGISTER
12993 		   && hard_regno_nregs[regno][GET_MODE (x)] > 1)
12994 	    {
12995 	      unsigned int ourend = END_HARD_REGNO (x);
12996 	      unsigned int i, offset;
12997 	      rtx oldnotes = 0;
12998 
12999 	      if (note)
13000 		offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13001 	      else
13002 		offset = 1;
13003 
13004 	      for (i = regno + offset; i < ourend; i++)
13005 		move_deaths (regno_reg_rtx[i],
13006 			     maybe_kill_insn, from_luid, to_insn, &oldnotes);
13007 	    }
13008 
13009 	  if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13010 	    {
13011 	      XEXP (note, 1) = *pnotes;
13012 	      *pnotes = note;
13013 	    }
13014 	  else
13015 	    *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13016 	}
13017 
13018       return;
13019     }
13020 
13021   else if (GET_CODE (x) == SET)
13022     {
13023       rtx dest = SET_DEST (x);
13024 
13025       move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13026 
13027       /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13028 	 that accesses one word of a multi-word item, some
13029 	 piece of everything register in the expression is used by
13030 	 this insn, so remove any old death.  */
13031       /* ??? So why do we test for equality of the sizes?  */
13032 
13033       if (GET_CODE (dest) == ZERO_EXTRACT
13034 	  || GET_CODE (dest) == STRICT_LOW_PART
13035 	  || (GET_CODE (dest) == SUBREG
13036 	      && (((GET_MODE_SIZE (GET_MODE (dest))
13037 		    + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13038 		  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13039 		       + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13040 	{
13041 	  move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13042 	  return;
13043 	}
13044 
13045       /* If this is some other SUBREG, we know it replaces the entire
13046 	 value, so use that as the destination.  */
13047       if (GET_CODE (dest) == SUBREG)
13048 	dest = SUBREG_REG (dest);
13049 
13050       /* If this is a MEM, adjust deaths of anything used in the address.
13051 	 For a REG (the only other possibility), the entire value is
13052 	 being replaced so the old value is not used in this insn.  */
13053 
13054       if (MEM_P (dest))
13055 	move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13056 		     to_insn, pnotes);
13057       return;
13058     }
13059 
13060   else if (GET_CODE (x) == CLOBBER)
13061     return;
13062 
13063   len = GET_RTX_LENGTH (code);
13064   fmt = GET_RTX_FORMAT (code);
13065 
13066   for (i = 0; i < len; i++)
13067     {
13068       if (fmt[i] == 'E')
13069 	{
13070 	  int j;
13071 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13072 	    move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13073 			 to_insn, pnotes);
13074 	}
13075       else if (fmt[i] == 'e')
13076 	move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13077     }
13078 }
13079 
13080 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13081    pattern of an insn.  X must be a REG.  */
13082 
13083 static int
13084 reg_bitfield_target_p (rtx x, rtx body)
13085 {
13086   int i;
13087 
13088   if (GET_CODE (body) == SET)
13089     {
13090       rtx dest = SET_DEST (body);
13091       rtx target;
13092       unsigned int regno, tregno, endregno, endtregno;
13093 
13094       if (GET_CODE (dest) == ZERO_EXTRACT)
13095 	target = XEXP (dest, 0);
13096       else if (GET_CODE (dest) == STRICT_LOW_PART)
13097 	target = SUBREG_REG (XEXP (dest, 0));
13098       else
13099 	return 0;
13100 
13101       if (GET_CODE (target) == SUBREG)
13102 	target = SUBREG_REG (target);
13103 
13104       if (!REG_P (target))
13105 	return 0;
13106 
13107       tregno = REGNO (target), regno = REGNO (x);
13108       if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13109 	return target == x;
13110 
13111       endtregno = end_hard_regno (GET_MODE (target), tregno);
13112       endregno = end_hard_regno (GET_MODE (x), regno);
13113 
13114       return endregno > tregno && regno < endtregno;
13115     }
13116 
13117   else if (GET_CODE (body) == PARALLEL)
13118     for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13119       if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13120 	return 1;
13121 
13122   return 0;
13123 }
13124 
13125 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13126    as appropriate.  I3 and I2 are the insns resulting from the combination
13127    insns including FROM (I2 may be zero).
13128 
13129    ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13130    not need REG_DEAD notes because they are being substituted for.  This
13131    saves searching in the most common cases.
13132 
13133    Each note in the list is either ignored or placed on some insns, depending
13134    on the type of note.  */
13135 
13136 static void
13137 distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
13138 		  rtx elim_i1, rtx elim_i0)
13139 {
13140   rtx note, next_note;
13141   rtx tem;
13142 
13143   for (note = notes; note; note = next_note)
13144     {
13145       rtx place = 0, place2 = 0;
13146 
13147       next_note = XEXP (note, 1);
13148       switch (REG_NOTE_KIND (note))
13149 	{
13150 	case REG_BR_PROB:
13151 	case REG_BR_PRED:
13152 	  /* Doesn't matter much where we put this, as long as it's somewhere.
13153 	     It is preferable to keep these notes on branches, which is most
13154 	     likely to be i3.  */
13155 	  place = i3;
13156 	  break;
13157 
13158 	case REG_NON_LOCAL_GOTO:
13159 	  if (JUMP_P (i3))
13160 	    place = i3;
13161 	  else
13162 	    {
13163 	      gcc_assert (i2 && JUMP_P (i2));
13164 	      place = i2;
13165 	    }
13166 	  break;
13167 
13168 	case REG_EH_REGION:
13169 	  /* These notes must remain with the call or trapping instruction.  */
13170 	  if (CALL_P (i3))
13171 	    place = i3;
13172 	  else if (i2 && CALL_P (i2))
13173 	    place = i2;
13174 	  else
13175 	    {
13176 	      gcc_assert (cfun->can_throw_non_call_exceptions);
13177 	      if (may_trap_p (i3))
13178 		place = i3;
13179 	      else if (i2 && may_trap_p (i2))
13180 		place = i2;
13181 	      /* ??? Otherwise assume we've combined things such that we
13182 		 can now prove that the instructions can't trap.  Drop the
13183 		 note in this case.  */
13184 	    }
13185 	  break;
13186 
13187 	case REG_ARGS_SIZE:
13188 	  /* ??? How to distribute between i3-i1.  Assume i3 contains the
13189 	     entire adjustment.  Assert i3 contains at least some adjust.  */
13190 	  if (!noop_move_p (i3))
13191 	    {
13192 	      int old_size, args_size = INTVAL (XEXP (note, 0));
13193 	      /* fixup_args_size_notes looks at REG_NORETURN note,
13194 		 so ensure the note is placed there first.  */
13195 	      if (CALL_P (i3))
13196 		{
13197 		  rtx *np;
13198 		  for (np = &next_note; *np; np = &XEXP (*np, 1))
13199 		    if (REG_NOTE_KIND (*np) == REG_NORETURN)
13200 		      {
13201 			rtx n = *np;
13202 			*np = XEXP (n, 1);
13203 			XEXP (n, 1) = REG_NOTES (i3);
13204 			REG_NOTES (i3) = n;
13205 			break;
13206 		      }
13207 		}
13208 	      old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13209 	      /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13210 		 REG_ARGS_SIZE note to all noreturn calls, allow that here.  */
13211 	      gcc_assert (old_size != args_size
13212 			  || (CALL_P (i3)
13213 			      && !ACCUMULATE_OUTGOING_ARGS
13214 			      && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13215 	    }
13216 	  break;
13217 
13218 	case REG_NORETURN:
13219 	case REG_SETJMP:
13220 	case REG_TM:
13221 	  /* These notes must remain with the call.  It should not be
13222 	     possible for both I2 and I3 to be a call.  */
13223 	  if (CALL_P (i3))
13224 	    place = i3;
13225 	  else
13226 	    {
13227 	      gcc_assert (i2 && CALL_P (i2));
13228 	      place = i2;
13229 	    }
13230 	  break;
13231 
13232 	case REG_UNUSED:
13233 	  /* Any clobbers for i3 may still exist, and so we must process
13234 	     REG_UNUSED notes from that insn.
13235 
13236 	     Any clobbers from i2 or i1 can only exist if they were added by
13237 	     recog_for_combine.  In that case, recog_for_combine created the
13238 	     necessary REG_UNUSED notes.  Trying to keep any original
13239 	     REG_UNUSED notes from these insns can cause incorrect output
13240 	     if it is for the same register as the original i3 dest.
13241 	     In that case, we will notice that the register is set in i3,
13242 	     and then add a REG_UNUSED note for the destination of i3, which
13243 	     is wrong.  However, it is possible to have REG_UNUSED notes from
13244 	     i2 or i1 for register which were both used and clobbered, so
13245 	     we keep notes from i2 or i1 if they will turn into REG_DEAD
13246 	     notes.  */
13247 
13248 	  /* If this register is set or clobbered in I3, put the note there
13249 	     unless there is one already.  */
13250 	  if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13251 	    {
13252 	      if (from_insn != i3)
13253 		break;
13254 
13255 	      if (! (REG_P (XEXP (note, 0))
13256 		     ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13257 		     : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13258 		place = i3;
13259 	    }
13260 	  /* Otherwise, if this register is used by I3, then this register
13261 	     now dies here, so we must put a REG_DEAD note here unless there
13262 	     is one already.  */
13263 	  else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13264 		   && ! (REG_P (XEXP (note, 0))
13265 			 ? find_regno_note (i3, REG_DEAD,
13266 					    REGNO (XEXP (note, 0)))
13267 			 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13268 	    {
13269 	      PUT_REG_NOTE_KIND (note, REG_DEAD);
13270 	      place = i3;
13271 	    }
13272 	  break;
13273 
13274 	case REG_EQUAL:
13275 	case REG_EQUIV:
13276 	case REG_NOALIAS:
13277 	  /* These notes say something about results of an insn.  We can
13278 	     only support them if they used to be on I3 in which case they
13279 	     remain on I3.  Otherwise they are ignored.
13280 
13281 	     If the note refers to an expression that is not a constant, we
13282 	     must also ignore the note since we cannot tell whether the
13283 	     equivalence is still true.  It might be possible to do
13284 	     slightly better than this (we only have a problem if I2DEST
13285 	     or I1DEST is present in the expression), but it doesn't
13286 	     seem worth the trouble.  */
13287 
13288 	  if (from_insn == i3
13289 	      && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13290 	    place = i3;
13291 	  break;
13292 
13293 	case REG_INC:
13294 	  /* These notes say something about how a register is used.  They must
13295 	     be present on any use of the register in I2 or I3.  */
13296 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13297 	    place = i3;
13298 
13299 	  if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13300 	    {
13301 	      if (place)
13302 		place2 = i2;
13303 	      else
13304 		place = i2;
13305 	    }
13306 	  break;
13307 
13308 	case REG_LABEL_TARGET:
13309 	case REG_LABEL_OPERAND:
13310 	  /* This can show up in several ways -- either directly in the
13311 	     pattern, or hidden off in the constant pool with (or without?)
13312 	     a REG_EQUAL note.  */
13313 	  /* ??? Ignore the without-reg_equal-note problem for now.  */
13314 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13315 	      || ((tem = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13316 		  && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13317 		  && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0)))
13318 	    place = i3;
13319 
13320 	  if (i2
13321 	      && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13322 		  || ((tem = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13323 		      && GET_CODE (XEXP (tem, 0)) == LABEL_REF
13324 		      && XEXP (XEXP (tem, 0), 0) == XEXP (note, 0))))
13325 	    {
13326 	      if (place)
13327 		place2 = i2;
13328 	      else
13329 		place = i2;
13330 	    }
13331 
13332 	  /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13333 	     as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13334 	     there.  */
13335 	  if (place && JUMP_P (place)
13336 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13337 	      && (JUMP_LABEL (place) == NULL
13338 		  || JUMP_LABEL (place) == XEXP (note, 0)))
13339 	    {
13340 	      rtx label = JUMP_LABEL (place);
13341 
13342 	      if (!label)
13343 		JUMP_LABEL (place) = XEXP (note, 0);
13344 	      else if (LABEL_P (label))
13345 		LABEL_NUSES (label)--;
13346 	    }
13347 
13348 	  if (place2 && JUMP_P (place2)
13349 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13350 	      && (JUMP_LABEL (place2) == NULL
13351 		  || JUMP_LABEL (place2) == XEXP (note, 0)))
13352 	    {
13353 	      rtx label = JUMP_LABEL (place2);
13354 
13355 	      if (!label)
13356 		JUMP_LABEL (place2) = XEXP (note, 0);
13357 	      else if (LABEL_P (label))
13358 		LABEL_NUSES (label)--;
13359 	      place2 = 0;
13360 	    }
13361 	  break;
13362 
13363 	case REG_NONNEG:
13364 	  /* This note says something about the value of a register prior
13365 	     to the execution of an insn.  It is too much trouble to see
13366 	     if the note is still correct in all situations.  It is better
13367 	     to simply delete it.  */
13368 	  break;
13369 
13370 	case REG_DEAD:
13371 	  /* If we replaced the right hand side of FROM_INSN with a
13372 	     REG_EQUAL note, the original use of the dying register
13373 	     will not have been combined into I3 and I2.  In such cases,
13374 	     FROM_INSN is guaranteed to be the first of the combined
13375 	     instructions, so we simply need to search back before
13376 	     FROM_INSN for the previous use or set of this register,
13377 	     then alter the notes there appropriately.
13378 
13379 	     If the register is used as an input in I3, it dies there.
13380 	     Similarly for I2, if it is nonzero and adjacent to I3.
13381 
13382 	     If the register is not used as an input in either I3 or I2
13383 	     and it is not one of the registers we were supposed to eliminate,
13384 	     there are two possibilities.  We might have a non-adjacent I2
13385 	     or we might have somehow eliminated an additional register
13386 	     from a computation.  For example, we might have had A & B where
13387 	     we discover that B will always be zero.  In this case we will
13388 	     eliminate the reference to A.
13389 
13390 	     In both cases, we must search to see if we can find a previous
13391 	     use of A and put the death note there.  */
13392 
13393 	  if (from_insn
13394 	      && from_insn == i2mod
13395 	      && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13396 	    tem = from_insn;
13397 	  else
13398 	    {
13399 	      if (from_insn
13400 		  && CALL_P (from_insn)
13401 		  && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13402 		place = from_insn;
13403 	      else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13404 		place = i3;
13405 	      else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13406 		       && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13407 		place = i2;
13408 	      else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13409 			&& !(i2mod
13410 			     && reg_overlap_mentioned_p (XEXP (note, 0),
13411 							 i2mod_old_rhs)))
13412 		       || rtx_equal_p (XEXP (note, 0), elim_i1)
13413 		       || rtx_equal_p (XEXP (note, 0), elim_i0))
13414 		break;
13415 	      tem = i3;
13416 	    }
13417 
13418 	  if (place == 0)
13419 	    {
13420 	      basic_block bb = this_basic_block;
13421 
13422 	      for (tem = PREV_INSN (tem); place == 0; tem = PREV_INSN (tem))
13423 		{
13424 		  if (!NONDEBUG_INSN_P (tem))
13425 		    {
13426 		      if (tem == BB_HEAD (bb))
13427 			break;
13428 		      continue;
13429 		    }
13430 
13431 		  /* If the register is being set at TEM, see if that is all
13432 		     TEM is doing.  If so, delete TEM.  Otherwise, make this
13433 		     into a REG_UNUSED note instead. Don't delete sets to
13434 		     global register vars.  */
13435 		  if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13436 		       || !global_regs[REGNO (XEXP (note, 0))])
13437 		      && reg_set_p (XEXP (note, 0), PATTERN (tem)))
13438 		    {
13439 		      rtx set = single_set (tem);
13440 		      rtx inner_dest = 0;
13441 #ifdef HAVE_cc0
13442 		      rtx cc0_setter = NULL_RTX;
13443 #endif
13444 
13445 		      if (set != 0)
13446 			for (inner_dest = SET_DEST (set);
13447 			     (GET_CODE (inner_dest) == STRICT_LOW_PART
13448 			      || GET_CODE (inner_dest) == SUBREG
13449 			      || GET_CODE (inner_dest) == ZERO_EXTRACT);
13450 			     inner_dest = XEXP (inner_dest, 0))
13451 			  ;
13452 
13453 		      /* Verify that it was the set, and not a clobber that
13454 			 modified the register.
13455 
13456 			 CC0 targets must be careful to maintain setter/user
13457 			 pairs.  If we cannot delete the setter due to side
13458 			 effects, mark the user with an UNUSED note instead
13459 			 of deleting it.  */
13460 
13461 		      if (set != 0 && ! side_effects_p (SET_SRC (set))
13462 			  && rtx_equal_p (XEXP (note, 0), inner_dest)
13463 #ifdef HAVE_cc0
13464 			  && (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13465 			      || ((cc0_setter = prev_cc0_setter (tem)) != NULL
13466 				  && sets_cc0_p (PATTERN (cc0_setter)) > 0))
13467 #endif
13468 			  )
13469 			{
13470 			  /* Move the notes and links of TEM elsewhere.
13471 			     This might delete other dead insns recursively.
13472 			     First set the pattern to something that won't use
13473 			     any register.  */
13474 			  rtx old_notes = REG_NOTES (tem);
13475 
13476 			  PATTERN (tem) = pc_rtx;
13477 			  REG_NOTES (tem) = NULL;
13478 
13479 			  distribute_notes (old_notes, tem, tem, NULL_RTX,
13480 					    NULL_RTX, NULL_RTX, NULL_RTX);
13481 			  distribute_links (LOG_LINKS (tem));
13482 
13483 			  SET_INSN_DELETED (tem);
13484 			  if (tem == i2)
13485 			    i2 = NULL_RTX;
13486 
13487 #ifdef HAVE_cc0
13488 			  /* Delete the setter too.  */
13489 			  if (cc0_setter)
13490 			    {
13491 			      PATTERN (cc0_setter) = pc_rtx;
13492 			      old_notes = REG_NOTES (cc0_setter);
13493 			      REG_NOTES (cc0_setter) = NULL;
13494 
13495 			      distribute_notes (old_notes, cc0_setter,
13496 						cc0_setter, NULL_RTX,
13497 						NULL_RTX, NULL_RTX, NULL_RTX);
13498 			      distribute_links (LOG_LINKS (cc0_setter));
13499 
13500 			      SET_INSN_DELETED (cc0_setter);
13501 			      if (cc0_setter == i2)
13502 				i2 = NULL_RTX;
13503 			    }
13504 #endif
13505 			}
13506 		      else
13507 			{
13508 			  PUT_REG_NOTE_KIND (note, REG_UNUSED);
13509 
13510 			  /*  If there isn't already a REG_UNUSED note, put one
13511 			      here.  Do not place a REG_DEAD note, even if
13512 			      the register is also used here; that would not
13513 			      match the algorithm used in lifetime analysis
13514 			      and can cause the consistency check in the
13515 			      scheduler to fail.  */
13516 			  if (! find_regno_note (tem, REG_UNUSED,
13517 						 REGNO (XEXP (note, 0))))
13518 			    place = tem;
13519 			  break;
13520 			}
13521 		    }
13522 		  else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem))
13523 			   || (CALL_P (tem)
13524 			       && find_reg_fusage (tem, USE, XEXP (note, 0))))
13525 		    {
13526 		      place = tem;
13527 
13528 		      /* If we are doing a 3->2 combination, and we have a
13529 			 register which formerly died in i3 and was not used
13530 			 by i2, which now no longer dies in i3 and is used in
13531 			 i2 but does not die in i2, and place is between i2
13532 			 and i3, then we may need to move a link from place to
13533 			 i2.  */
13534 		      if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13535 			  && from_insn
13536 			  && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13537 			  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13538 			{
13539 			  struct insn_link *links = LOG_LINKS (place);
13540 			  LOG_LINKS (place) = NULL;
13541 			  distribute_links (links);
13542 			}
13543 		      break;
13544 		    }
13545 
13546 		  if (tem == BB_HEAD (bb))
13547 		    break;
13548 		}
13549 
13550 	    }
13551 
13552 	  /* If the register is set or already dead at PLACE, we needn't do
13553 	     anything with this note if it is still a REG_DEAD note.
13554 	     We check here if it is set at all, not if is it totally replaced,
13555 	     which is what `dead_or_set_p' checks, so also check for it being
13556 	     set partially.  */
13557 
13558 	  if (place && REG_NOTE_KIND (note) == REG_DEAD)
13559 	    {
13560 	      unsigned int regno = REGNO (XEXP (note, 0));
13561 	      reg_stat_type *rsp = &reg_stat[regno];
13562 
13563 	      if (dead_or_set_p (place, XEXP (note, 0))
13564 		  || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
13565 		{
13566 		  /* Unless the register previously died in PLACE, clear
13567 		     last_death.  [I no longer understand why this is
13568 		     being done.] */
13569 		  if (rsp->last_death != place)
13570 		    rsp->last_death = 0;
13571 		  place = 0;
13572 		}
13573 	      else
13574 		rsp->last_death = place;
13575 
13576 	      /* If this is a death note for a hard reg that is occupying
13577 		 multiple registers, ensure that we are still using all
13578 		 parts of the object.  If we find a piece of the object
13579 		 that is unused, we must arrange for an appropriate REG_DEAD
13580 		 note to be added for it.  However, we can't just emit a USE
13581 		 and tag the note to it, since the register might actually
13582 		 be dead; so we recourse, and the recursive call then finds
13583 		 the previous insn that used this register.  */
13584 
13585 	      if (place && regno < FIRST_PSEUDO_REGISTER
13586 		  && hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))] > 1)
13587 		{
13588 		  unsigned int endregno = END_HARD_REGNO (XEXP (note, 0));
13589 		  int all_used = 1;
13590 		  unsigned int i;
13591 
13592 		  for (i = regno; i < endregno; i++)
13593 		    if ((! refers_to_regno_p (i, i + 1, PATTERN (place), 0)
13594 			 && ! find_regno_fusage (place, USE, i))
13595 			|| dead_or_set_regno_p (place, i))
13596 		      all_used = 0;
13597 
13598 		  if (! all_used)
13599 		    {
13600 		      /* Put only REG_DEAD notes for pieces that are
13601 			 not already dead or set.  */
13602 
13603 		      for (i = regno; i < endregno;
13604 			   i += hard_regno_nregs[i][reg_raw_mode[i]])
13605 			{
13606 			  rtx piece = regno_reg_rtx[i];
13607 			  basic_block bb = this_basic_block;
13608 
13609 			  if (! dead_or_set_p (place, piece)
13610 			      && ! reg_bitfield_target_p (piece,
13611 							  PATTERN (place)))
13612 			    {
13613 			      rtx new_note = alloc_reg_note (REG_DEAD, piece,
13614 							     NULL_RTX);
13615 
13616 			      distribute_notes (new_note, place, place,
13617 						NULL_RTX, NULL_RTX, NULL_RTX,
13618 						NULL_RTX);
13619 			    }
13620 			  else if (! refers_to_regno_p (i, i + 1,
13621 							PATTERN (place), 0)
13622 				   && ! find_regno_fusage (place, USE, i))
13623 			    for (tem = PREV_INSN (place); ;
13624 				 tem = PREV_INSN (tem))
13625 			      {
13626 				if (!NONDEBUG_INSN_P (tem))
13627 				  {
13628 				    if (tem == BB_HEAD (bb))
13629 			 	      break;
13630 				    continue;
13631 				  }
13632 				if (dead_or_set_p (tem, piece)
13633 				    || reg_bitfield_target_p (piece,
13634 							      PATTERN (tem)))
13635 				  {
13636 				    add_reg_note (tem, REG_UNUSED, piece);
13637 				    break;
13638 				  }
13639 			      }
13640 
13641 			}
13642 
13643 		      place = 0;
13644 		    }
13645 		}
13646 	    }
13647 	  break;
13648 
13649 	default:
13650 	  /* Any other notes should not be present at this point in the
13651 	     compilation.  */
13652 	  gcc_unreachable ();
13653 	}
13654 
13655       if (place)
13656 	{
13657 	  XEXP (note, 1) = REG_NOTES (place);
13658 	  REG_NOTES (place) = note;
13659 	}
13660 
13661       if (place2)
13662 	add_reg_note (place2, REG_NOTE_KIND (note), XEXP (note, 0));
13663     }
13664 }
13665 
13666 /* Similarly to above, distribute the LOG_LINKS that used to be present on
13667    I3, I2, and I1 to new locations.  This is also called to add a link
13668    pointing at I3 when I3's destination is changed.  */
13669 
13670 static void
13671 distribute_links (struct insn_link *links)
13672 {
13673   struct insn_link *link, *next_link;
13674 
13675   for (link = links; link; link = next_link)
13676     {
13677       rtx place = 0;
13678       rtx insn;
13679       rtx set, reg;
13680 
13681       next_link = link->next;
13682 
13683       /* If the insn that this link points to is a NOTE or isn't a single
13684 	 set, ignore it.  In the latter case, it isn't clear what we
13685 	 can do other than ignore the link, since we can't tell which
13686 	 register it was for.  Such links wouldn't be used by combine
13687 	 anyway.
13688 
13689 	 It is not possible for the destination of the target of the link to
13690 	 have been changed by combine.  The only potential of this is if we
13691 	 replace I3, I2, and I1 by I3 and I2.  But in that case the
13692 	 destination of I2 also remains unchanged.  */
13693 
13694       if (NOTE_P (link->insn)
13695 	  || (set = single_set (link->insn)) == 0)
13696 	continue;
13697 
13698       reg = SET_DEST (set);
13699       while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
13700 	     || GET_CODE (reg) == STRICT_LOW_PART)
13701 	reg = XEXP (reg, 0);
13702 
13703       /* A LOG_LINK is defined as being placed on the first insn that uses
13704 	 a register and points to the insn that sets the register.  Start
13705 	 searching at the next insn after the target of the link and stop
13706 	 when we reach a set of the register or the end of the basic block.
13707 
13708 	 Note that this correctly handles the link that used to point from
13709 	 I3 to I2.  Also note that not much searching is typically done here
13710 	 since most links don't point very far away.  */
13711 
13712       for (insn = NEXT_INSN (link->insn);
13713 	   (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
13714 		     || BB_HEAD (this_basic_block->next_bb) != insn));
13715 	   insn = NEXT_INSN (insn))
13716 	if (DEBUG_INSN_P (insn))
13717 	  continue;
13718 	else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
13719 	  {
13720 	    if (reg_referenced_p (reg, PATTERN (insn)))
13721 	      place = insn;
13722 	    break;
13723 	  }
13724 	else if (CALL_P (insn)
13725 		 && find_reg_fusage (insn, USE, reg))
13726 	  {
13727 	    place = insn;
13728 	    break;
13729 	  }
13730 	else if (INSN_P (insn) && reg_set_p (reg, insn))
13731 	  break;
13732 
13733       /* If we found a place to put the link, place it there unless there
13734 	 is already a link to the same insn as LINK at that point.  */
13735 
13736       if (place)
13737 	{
13738 	  struct insn_link *link2;
13739 
13740 	  FOR_EACH_LOG_LINK (link2, place)
13741 	    if (link2->insn == link->insn)
13742 	      break;
13743 
13744 	  if (link2 == NULL)
13745 	    {
13746 	      link->next = LOG_LINKS (place);
13747 	      LOG_LINKS (place) = link;
13748 
13749 	      /* Set added_links_insn to the earliest insn we added a
13750 		 link to.  */
13751 	      if (added_links_insn == 0
13752 		  || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
13753 		added_links_insn = place;
13754 	    }
13755 	}
13756     }
13757 }
13758 
13759 /* Subroutine of unmentioned_reg_p and callback from for_each_rtx.
13760    Check whether the expression pointer to by LOC is a register or
13761    memory, and if so return 1 if it isn't mentioned in the rtx EXPR.
13762    Otherwise return zero.  */
13763 
13764 static int
13765 unmentioned_reg_p_1 (rtx *loc, void *expr)
13766 {
13767   rtx x = *loc;
13768 
13769   if (x != NULL_RTX
13770       && (REG_P (x) || MEM_P (x))
13771       && ! reg_mentioned_p (x, (rtx) expr))
13772     return 1;
13773   return 0;
13774 }
13775 
13776 /* Check for any register or memory mentioned in EQUIV that is not
13777    mentioned in EXPR.  This is used to restrict EQUIV to "specializations"
13778    of EXPR where some registers may have been replaced by constants.  */
13779 
13780 static bool
13781 unmentioned_reg_p (rtx equiv, rtx expr)
13782 {
13783   return for_each_rtx (&equiv, unmentioned_reg_p_1, expr);
13784 }
13785 
13786 DEBUG_FUNCTION void
13787 dump_combine_stats (FILE *file)
13788 {
13789   fprintf
13790     (file,
13791      ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
13792      combine_attempts, combine_merges, combine_extras, combine_successes);
13793 }
13794 
13795 void
13796 dump_combine_total_stats (FILE *file)
13797 {
13798   fprintf
13799     (file,
13800      "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
13801      total_attempts, total_merges, total_extras, total_successes);
13802 }
13803 
13804 static bool
13805 gate_handle_combine (void)
13806 {
13807   return (optimize > 0);
13808 }
13809 
13810 /* Try combining insns through substitution.  */
13811 static unsigned int
13812 rest_of_handle_combine (void)
13813 {
13814   int rebuild_jump_labels_after_combine;
13815 
13816   df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
13817   df_note_add_problem ();
13818   df_analyze ();
13819 
13820   regstat_init_n_sets_and_refs ();
13821 
13822   rebuild_jump_labels_after_combine
13823     = combine_instructions (get_insns (), max_reg_num ());
13824 
13825   /* Combining insns may have turned an indirect jump into a
13826      direct jump.  Rebuild the JUMP_LABEL fields of jumping
13827      instructions.  */
13828   if (rebuild_jump_labels_after_combine)
13829     {
13830       timevar_push (TV_JUMP);
13831       rebuild_jump_labels (get_insns ());
13832       cleanup_cfg (0);
13833       timevar_pop (TV_JUMP);
13834     }
13835 
13836   regstat_free_n_sets_and_refs ();
13837   return 0;
13838 }
13839 
13840 struct rtl_opt_pass pass_combine =
13841 {
13842  {
13843   RTL_PASS,
13844   "combine",                            /* name */
13845   OPTGROUP_NONE,                        /* optinfo_flags */
13846   gate_handle_combine,                  /* gate */
13847   rest_of_handle_combine,               /* execute */
13848   NULL,                                 /* sub */
13849   NULL,                                 /* next */
13850   0,                                    /* static_pass_number */
13851   TV_COMBINE,                           /* tv_id */
13852   PROP_cfglayout,                       /* properties_required */
13853   0,                                    /* properties_provided */
13854   0,                                    /* properties_destroyed */
13855   0,                                    /* todo_flags_start */
13856   TODO_df_finish | TODO_verify_rtl_sharing |
13857   TODO_ggc_collect,                     /* todo_flags_finish */
13858  }
13859 };
13860