xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/optabs.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2    Copyright (C) 1987-2015 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "diagnostic-core.h"
26 
27 /* Include insn-config.h before expr.h so that HAVE_conditional_move
28    is properly defined.  */
29 #include "insn-config.h"
30 #include "rtl.h"
31 #include "hash-set.h"
32 #include "machmode.h"
33 #include "vec.h"
34 #include "double-int.h"
35 #include "input.h"
36 #include "alias.h"
37 #include "symtab.h"
38 #include "wide-int.h"
39 #include "inchash.h"
40 #include "tree.h"
41 #include "tree-hasher.h"
42 #include "stor-layout.h"
43 #include "stringpool.h"
44 #include "varasm.h"
45 #include "tm_p.h"
46 #include "flags.h"
47 #include "hard-reg-set.h"
48 #include "function.h"
49 #include "except.h"
50 #include "hashtab.h"
51 #include "statistics.h"
52 #include "real.h"
53 #include "fixed-value.h"
54 #include "expmed.h"
55 #include "dojump.h"
56 #include "explow.h"
57 #include "calls.h"
58 #include "emit-rtl.h"
59 #include "stmt.h"
60 #include "expr.h"
61 #include "insn-codes.h"
62 #include "optabs.h"
63 #include "libfuncs.h"
64 #include "recog.h"
65 #include "reload.h"
66 #include "ggc.h"
67 #include "predict.h"
68 #include "dominance.h"
69 #include "cfg.h"
70 #include "basic-block.h"
71 #include "target.h"
72 
73 struct target_optabs default_target_optabs;
74 struct target_libfuncs default_target_libfuncs;
75 struct target_optabs *this_fn_optabs = &default_target_optabs;
76 #if SWITCHABLE_TARGET
77 struct target_optabs *this_target_optabs = &default_target_optabs;
78 struct target_libfuncs *this_target_libfuncs = &default_target_libfuncs;
79 #endif
80 
81 #define libfunc_hash \
82   (this_target_libfuncs->x_libfunc_hash)
83 
84 static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *,
85 				   machine_mode *);
86 static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int);
87 static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool);
88 
89 /* Debug facility for use in GDB.  */
90 void debug_optab_libfuncs (void);
91 
92 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
93 #if ENABLE_DECIMAL_BID_FORMAT
94 #define DECIMAL_PREFIX "bid_"
95 #else
96 #define DECIMAL_PREFIX "dpd_"
97 #endif
98 
99 /* Used for libfunc_hash.  */
100 
101 hashval_t
102 libfunc_hasher::hash (libfunc_entry *e)
103 {
104   return ((e->mode1 + e->mode2 * NUM_MACHINE_MODES) ^ e->op);
105 }
106 
107 /* Used for libfunc_hash.  */
108 
109 bool
110 libfunc_hasher::equal (libfunc_entry *e1, libfunc_entry *e2)
111 {
112   return e1->op == e2->op && e1->mode1 == e2->mode1 && e1->mode2 == e2->mode2;
113 }
114 
115 /* Return libfunc corresponding operation defined by OPTAB converting
116    from MODE2 to MODE1.  Trigger lazy initialization if needed, return NULL
117    if no libfunc is available.  */
118 rtx
119 convert_optab_libfunc (convert_optab optab, machine_mode mode1,
120 		       machine_mode mode2)
121 {
122   struct libfunc_entry e;
123   struct libfunc_entry **slot;
124 
125   /* ??? This ought to be an assert, but not all of the places
126      that we expand optabs know about the optabs that got moved
127      to being direct.  */
128   if (!(optab >= FIRST_CONV_OPTAB && optab <= LAST_CONVLIB_OPTAB))
129     return NULL_RTX;
130 
131   e.op = optab;
132   e.mode1 = mode1;
133   e.mode2 = mode2;
134   slot = libfunc_hash->find_slot (&e, NO_INSERT);
135   if (!slot)
136     {
137       const struct convert_optab_libcall_d *d
138 	= &convlib_def[optab - FIRST_CONV_OPTAB];
139 
140       if (d->libcall_gen == NULL)
141 	return NULL;
142 
143       d->libcall_gen (optab, d->libcall_basename, mode1, mode2);
144       slot = libfunc_hash->find_slot (&e, NO_INSERT);
145       if (!slot)
146 	return NULL;
147     }
148   return (*slot)->libfunc;
149 }
150 
151 /* Return libfunc corresponding operation defined by OPTAB in MODE.
152    Trigger lazy initialization if needed, return NULL if no libfunc is
153    available.  */
154 rtx
155 optab_libfunc (optab optab, machine_mode mode)
156 {
157   struct libfunc_entry e;
158   struct libfunc_entry **slot;
159 
160   /* ??? This ought to be an assert, but not all of the places
161      that we expand optabs know about the optabs that got moved
162      to being direct.  */
163   if (!(optab >= FIRST_NORM_OPTAB && optab <= LAST_NORMLIB_OPTAB))
164     return NULL_RTX;
165 
166   e.op = optab;
167   e.mode1 = mode;
168   e.mode2 = VOIDmode;
169   slot = libfunc_hash->find_slot (&e, NO_INSERT);
170   if (!slot)
171     {
172       const struct optab_libcall_d *d
173 	= &normlib_def[optab - FIRST_NORM_OPTAB];
174 
175       if (d->libcall_gen == NULL)
176 	return NULL;
177 
178       d->libcall_gen (optab, d->libcall_basename, d->libcall_suffix, mode);
179       slot = libfunc_hash->find_slot (&e, NO_INSERT);
180       if (!slot)
181 	return NULL;
182     }
183   return (*slot)->libfunc;
184 }
185 
186 
187 /* Add a REG_EQUAL note to the last insn in INSNS.  TARGET is being set to
188    the result of operation CODE applied to OP0 (and OP1 if it is a binary
189    operation).
190 
191    If the last insn does not set TARGET, don't do anything, but return 1.
192 
193    If the last insn or a previous insn sets TARGET and TARGET is one of OP0
194    or OP1, don't add the REG_EQUAL note but return 0.  Our caller can then
195    try again, ensuring that TARGET is not one of the operands.  */
196 
197 static int
198 add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
199 {
200   rtx_insn *last_insn;
201   rtx set;
202   rtx note;
203 
204   gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
205 
206   if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
207       && GET_RTX_CLASS (code) != RTX_BIN_ARITH
208       && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
209       && GET_RTX_CLASS (code) != RTX_COMPARE
210       && GET_RTX_CLASS (code) != RTX_UNARY)
211     return 1;
212 
213   if (GET_CODE (target) == ZERO_EXTRACT)
214     return 1;
215 
216   for (last_insn = insns;
217        NEXT_INSN (last_insn) != NULL_RTX;
218        last_insn = NEXT_INSN (last_insn))
219     ;
220 
221   /* If TARGET is in OP0 or OP1, punt.  We'd end up with a note referencing
222      a value changing in the insn, so the note would be invalid for CSE.  */
223   if (reg_overlap_mentioned_p (target, op0)
224       || (op1 && reg_overlap_mentioned_p (target, op1)))
225     {
226       if (MEM_P (target)
227 	  && (rtx_equal_p (target, op0)
228 	      || (op1 && rtx_equal_p (target, op1))))
229 	{
230 	  /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
231 	     over expanding it as temp = MEM op X, MEM = temp.  If the target
232 	     supports MEM = MEM op X instructions, it is sometimes too hard
233 	     to reconstruct that form later, especially if X is also a memory,
234 	     and due to multiple occurrences of addresses the address might
235 	     be forced into register unnecessarily.
236 	     Note that not emitting the REG_EQUIV note might inhibit
237 	     CSE in some cases.  */
238 	  set = single_set (last_insn);
239 	  if (set
240 	      && GET_CODE (SET_SRC (set)) == code
241 	      && MEM_P (SET_DEST (set))
242 	      && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0))
243 		  || (op1 && rtx_equal_p (SET_DEST (set),
244 					  XEXP (SET_SRC (set), 1)))))
245 	    return 1;
246 	}
247       return 0;
248     }
249 
250   set = set_for_reg_notes (last_insn);
251   if (set == NULL_RTX)
252     return 1;
253 
254   if (! rtx_equal_p (SET_DEST (set), target)
255       /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it.  */
256       && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
257 	  || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
258     return 1;
259 
260   if (GET_RTX_CLASS (code) == RTX_UNARY)
261     switch (code)
262       {
263       case FFS:
264       case CLZ:
265       case CTZ:
266       case CLRSB:
267       case POPCOUNT:
268       case PARITY:
269       case BSWAP:
270 	if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
271 	  {
272 	    note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
273 	    if (GET_MODE_SIZE (GET_MODE (op0))
274 		> GET_MODE_SIZE (GET_MODE (target)))
275 	      note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
276 					 note, GET_MODE (op0));
277 	    else
278 	      note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
279 					 note, GET_MODE (op0));
280 	    break;
281 	  }
282 	/* FALLTHRU */
283       default:
284 	note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
285 	break;
286       }
287   else
288     note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
289 
290   set_unique_reg_note (last_insn, REG_EQUAL, note);
291 
292   return 1;
293 }
294 
295 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
296    for a widening operation would be.  In most cases this would be OP0, but if
297    that's a constant it'll be VOIDmode, which isn't useful.  */
298 
299 static machine_mode
300 widened_mode (machine_mode to_mode, rtx op0, rtx op1)
301 {
302   machine_mode m0 = GET_MODE (op0);
303   machine_mode m1 = GET_MODE (op1);
304   machine_mode result;
305 
306   if (m0 == VOIDmode && m1 == VOIDmode)
307     return to_mode;
308   else if (m0 == VOIDmode || GET_MODE_SIZE (m0) < GET_MODE_SIZE (m1))
309     result = m1;
310   else
311     result = m0;
312 
313   if (GET_MODE_SIZE (result) > GET_MODE_SIZE (to_mode))
314     return to_mode;
315 
316   return result;
317 }
318 
319 /* Like optab_handler, but for widening_operations that have a
320    TO_MODE and a FROM_MODE.  */
321 
322 enum insn_code
323 widening_optab_handler (optab op, machine_mode to_mode,
324 			machine_mode from_mode)
325 {
326   unsigned scode = (op << 16) | to_mode;
327   if (to_mode != from_mode && from_mode != VOIDmode)
328     {
329       /* ??? Why does find_widening_optab_handler_and_mode attempt to
330 	 widen things that can't be widened?  E.g. add_optab... */
331       if (op > LAST_CONV_OPTAB)
332 	return CODE_FOR_nothing;
333       scode |= from_mode << 8;
334     }
335   return raw_optab_handler (scode);
336 }
337 
338 /* Find a widening optab even if it doesn't widen as much as we want.
339    E.g. if from_mode is HImode, and to_mode is DImode, and there is no
340    direct HI->SI insn, then return SI->DI, if that exists.
341    If PERMIT_NON_WIDENING is non-zero then this can be used with
342    non-widening optabs also.  */
343 
344 enum insn_code
345 find_widening_optab_handler_and_mode (optab op, machine_mode to_mode,
346 				      machine_mode from_mode,
347 				      int permit_non_widening,
348 				      machine_mode *found_mode)
349 {
350   for (; (permit_non_widening || from_mode != to_mode)
351 	 && GET_MODE_SIZE (from_mode) <= GET_MODE_SIZE (to_mode)
352 	 && from_mode != VOIDmode;
353        from_mode = GET_MODE_WIDER_MODE (from_mode))
354     {
355       enum insn_code handler = widening_optab_handler (op, to_mode,
356 						       from_mode);
357 
358       if (handler != CODE_FOR_nothing)
359 	{
360 	  if (found_mode)
361 	    *found_mode = from_mode;
362 	  return handler;
363 	}
364     }
365 
366   return CODE_FOR_nothing;
367 }
368 
369 /* Widen OP to MODE and return the rtx for the widened operand.  UNSIGNEDP
370    says whether OP is signed or unsigned.  NO_EXTEND is nonzero if we need
371    not actually do a sign-extend or zero-extend, but can leave the
372    higher-order bits of the result rtx undefined, for example, in the case
373    of logical operations, but not right shifts.  */
374 
375 static rtx
376 widen_operand (rtx op, machine_mode mode, machine_mode oldmode,
377 	       int unsignedp, int no_extend)
378 {
379   rtx result;
380 
381   /* If we don't have to extend and this is a constant, return it.  */
382   if (no_extend && GET_MODE (op) == VOIDmode)
383     return op;
384 
385   /* If we must extend do so.  If OP is a SUBREG for a promoted object, also
386      extend since it will be more efficient to do so unless the signedness of
387      a promoted object differs from our extension.  */
388   if (! no_extend
389       || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
390 	  && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
391     return convert_modes (mode, oldmode, op, unsignedp);
392 
393   /* If MODE is no wider than a single word, we return a lowpart or paradoxical
394      SUBREG.  */
395   if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
396     return gen_lowpart (mode, force_reg (GET_MODE (op), op));
397 
398   /* Otherwise, get an object of MODE, clobber it, and set the low-order
399      part to OP.  */
400 
401   result = gen_reg_rtx (mode);
402   emit_clobber (result);
403   emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
404   return result;
405 }
406 
407 /* Return the optab used for computing the operation given by the tree code,
408    CODE and the tree EXP.  This function is not always usable (for example, it
409    cannot give complete results for multiplication or division) but probably
410    ought to be relied on more widely throughout the expander.  */
411 optab
412 optab_for_tree_code (enum tree_code code, const_tree type,
413 		     enum optab_subtype subtype)
414 {
415   bool trapv;
416   switch (code)
417     {
418     case BIT_AND_EXPR:
419       return and_optab;
420 
421     case BIT_IOR_EXPR:
422       return ior_optab;
423 
424     case BIT_NOT_EXPR:
425       return one_cmpl_optab;
426 
427     case BIT_XOR_EXPR:
428       return xor_optab;
429 
430     case MULT_HIGHPART_EXPR:
431       return TYPE_UNSIGNED (type) ? umul_highpart_optab : smul_highpart_optab;
432 
433     case TRUNC_MOD_EXPR:
434     case CEIL_MOD_EXPR:
435     case FLOOR_MOD_EXPR:
436     case ROUND_MOD_EXPR:
437       return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
438 
439     case RDIV_EXPR:
440     case TRUNC_DIV_EXPR:
441     case CEIL_DIV_EXPR:
442     case FLOOR_DIV_EXPR:
443     case ROUND_DIV_EXPR:
444     case EXACT_DIV_EXPR:
445       if (TYPE_SATURATING (type))
446 	return TYPE_UNSIGNED (type) ? usdiv_optab : ssdiv_optab;
447       return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
448 
449     case LSHIFT_EXPR:
450       if (TREE_CODE (type) == VECTOR_TYPE)
451 	{
452 	  if (subtype == optab_vector)
453 	    return TYPE_SATURATING (type) ? unknown_optab : vashl_optab;
454 
455 	  gcc_assert (subtype == optab_scalar);
456 	}
457       if (TYPE_SATURATING (type))
458 	return TYPE_UNSIGNED (type) ? usashl_optab : ssashl_optab;
459       return ashl_optab;
460 
461     case RSHIFT_EXPR:
462       if (TREE_CODE (type) == VECTOR_TYPE)
463 	{
464 	  if (subtype == optab_vector)
465 	    return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
466 
467 	  gcc_assert (subtype == optab_scalar);
468 	}
469       return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
470 
471     case LROTATE_EXPR:
472       if (TREE_CODE (type) == VECTOR_TYPE)
473 	{
474 	  if (subtype == optab_vector)
475 	    return vrotl_optab;
476 
477 	  gcc_assert (subtype == optab_scalar);
478 	}
479       return rotl_optab;
480 
481     case RROTATE_EXPR:
482       if (TREE_CODE (type) == VECTOR_TYPE)
483 	{
484 	  if (subtype == optab_vector)
485 	    return vrotr_optab;
486 
487 	  gcc_assert (subtype == optab_scalar);
488 	}
489       return rotr_optab;
490 
491     case MAX_EXPR:
492       return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
493 
494     case MIN_EXPR:
495       return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
496 
497     case REALIGN_LOAD_EXPR:
498       return vec_realign_load_optab;
499 
500     case WIDEN_SUM_EXPR:
501       return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
502 
503     case DOT_PROD_EXPR:
504       return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
505 
506     case SAD_EXPR:
507       return TYPE_UNSIGNED (type) ? usad_optab : ssad_optab;
508 
509     case WIDEN_MULT_PLUS_EXPR:
510       return (TYPE_UNSIGNED (type)
511 	      ? (TYPE_SATURATING (type)
512 		 ? usmadd_widen_optab : umadd_widen_optab)
513 	      : (TYPE_SATURATING (type)
514 		 ? ssmadd_widen_optab : smadd_widen_optab));
515 
516     case WIDEN_MULT_MINUS_EXPR:
517       return (TYPE_UNSIGNED (type)
518 	      ? (TYPE_SATURATING (type)
519 		 ? usmsub_widen_optab : umsub_widen_optab)
520 	      : (TYPE_SATURATING (type)
521 		 ? ssmsub_widen_optab : smsub_widen_optab));
522 
523     case FMA_EXPR:
524       return fma_optab;
525 
526     case REDUC_MAX_EXPR:
527       return TYPE_UNSIGNED (type)
528 	     ? reduc_umax_scal_optab : reduc_smax_scal_optab;
529 
530     case REDUC_MIN_EXPR:
531       return TYPE_UNSIGNED (type)
532 	     ? reduc_umin_scal_optab : reduc_smin_scal_optab;
533 
534     case REDUC_PLUS_EXPR:
535       return reduc_plus_scal_optab;
536 
537     case VEC_WIDEN_MULT_HI_EXPR:
538       return TYPE_UNSIGNED (type) ?
539 	vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
540 
541     case VEC_WIDEN_MULT_LO_EXPR:
542       return TYPE_UNSIGNED (type) ?
543 	vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
544 
545     case VEC_WIDEN_MULT_EVEN_EXPR:
546       return TYPE_UNSIGNED (type) ?
547 	vec_widen_umult_even_optab : vec_widen_smult_even_optab;
548 
549     case VEC_WIDEN_MULT_ODD_EXPR:
550       return TYPE_UNSIGNED (type) ?
551 	vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
552 
553     case VEC_WIDEN_LSHIFT_HI_EXPR:
554       return TYPE_UNSIGNED (type) ?
555         vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab;
556 
557     case VEC_WIDEN_LSHIFT_LO_EXPR:
558       return TYPE_UNSIGNED (type) ?
559         vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab;
560 
561     case VEC_UNPACK_HI_EXPR:
562       return TYPE_UNSIGNED (type) ?
563 	vec_unpacku_hi_optab : vec_unpacks_hi_optab;
564 
565     case VEC_UNPACK_LO_EXPR:
566       return TYPE_UNSIGNED (type) ?
567 	vec_unpacku_lo_optab : vec_unpacks_lo_optab;
568 
569     case VEC_UNPACK_FLOAT_HI_EXPR:
570       /* The signedness is determined from input operand.  */
571       return TYPE_UNSIGNED (type) ?
572 	vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
573 
574     case VEC_UNPACK_FLOAT_LO_EXPR:
575       /* The signedness is determined from input operand.  */
576       return TYPE_UNSIGNED (type) ?
577 	vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
578 
579     case VEC_PACK_TRUNC_EXPR:
580       return vec_pack_trunc_optab;
581 
582     case VEC_PACK_SAT_EXPR:
583       return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
584 
585     case VEC_PACK_FIX_TRUNC_EXPR:
586       /* The signedness is determined from output operand.  */
587       return TYPE_UNSIGNED (type) ?
588 	vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
589 
590     default:
591       break;
592     }
593 
594   trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
595   switch (code)
596     {
597     case POINTER_PLUS_EXPR:
598     case PLUS_EXPR:
599       if (TYPE_SATURATING (type))
600 	return TYPE_UNSIGNED (type) ? usadd_optab : ssadd_optab;
601       return trapv ? addv_optab : add_optab;
602 
603     case MINUS_EXPR:
604       if (TYPE_SATURATING (type))
605 	return TYPE_UNSIGNED (type) ? ussub_optab : sssub_optab;
606       return trapv ? subv_optab : sub_optab;
607 
608     case MULT_EXPR:
609       if (TYPE_SATURATING (type))
610 	return TYPE_UNSIGNED (type) ? usmul_optab : ssmul_optab;
611       return trapv ? smulv_optab : smul_optab;
612 
613     case NEGATE_EXPR:
614       if (TYPE_SATURATING (type))
615 	return TYPE_UNSIGNED (type) ? usneg_optab : ssneg_optab;
616       return trapv ? negv_optab : neg_optab;
617 
618     case ABS_EXPR:
619       return trapv ? absv_optab : abs_optab;
620 
621     default:
622       return unknown_optab;
623     }
624 }
625 
626 /* Given optab UNOPTAB that reduces a vector to a scalar, find instead the old
627    optab that produces a vector with the reduction result in one element,
628    for a tree with type TYPE.  */
629 
630 optab
631 scalar_reduc_to_vector (optab unoptab, const_tree type)
632 {
633   switch (unoptab)
634     {
635     case reduc_plus_scal_optab:
636       return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
637 
638     case reduc_smin_scal_optab: return reduc_smin_optab;
639     case reduc_umin_scal_optab: return reduc_umin_optab;
640     case reduc_smax_scal_optab: return reduc_smax_optab;
641     case reduc_umax_scal_optab: return reduc_umax_optab;
642     default: return unknown_optab;
643     }
644 }
645 
646 /* Expand vector widening operations.
647 
648    There are two different classes of operations handled here:
649    1) Operations whose result is wider than all the arguments to the operation.
650       Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
651       In this case OP0 and optionally OP1 would be initialized,
652       but WIDE_OP wouldn't (not relevant for this case).
653    2) Operations whose result is of the same size as the last argument to the
654       operation, but wider than all the other arguments to the operation.
655       Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
656       In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
657 
658    E.g, when called to expand the following operations, this is how
659    the arguments will be initialized:
660                                 nops    OP0     OP1     WIDE_OP
661    widening-sum                 2       oprnd0  -       oprnd1
662    widening-dot-product         3       oprnd0  oprnd1  oprnd2
663    widening-mult                2       oprnd0  oprnd1  -
664    type-promotion (vec-unpack)  1       oprnd0  -       -  */
665 
666 rtx
667 expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
668 			   rtx target, int unsignedp)
669 {
670   struct expand_operand eops[4];
671   tree oprnd0, oprnd1, oprnd2;
672   machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
673   optab widen_pattern_optab;
674   enum insn_code icode;
675   int nops = TREE_CODE_LENGTH (ops->code);
676   int op;
677 
678   oprnd0 = ops->op0;
679   tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
680   widen_pattern_optab =
681     optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default);
682   if (ops->code == WIDEN_MULT_PLUS_EXPR
683       || ops->code == WIDEN_MULT_MINUS_EXPR)
684     icode = find_widening_optab_handler (widen_pattern_optab,
685 					 TYPE_MODE (TREE_TYPE (ops->op2)),
686 					 tmode0, 0);
687   else
688     icode = optab_handler (widen_pattern_optab, tmode0);
689   gcc_assert (icode != CODE_FOR_nothing);
690 
691   if (nops >= 2)
692     {
693       oprnd1 = ops->op1;
694       tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
695     }
696 
697   /* The last operand is of a wider mode than the rest of the operands.  */
698   if (nops == 2)
699     wmode = tmode1;
700   else if (nops == 3)
701     {
702       gcc_assert (tmode1 == tmode0);
703       gcc_assert (op1);
704       oprnd2 = ops->op2;
705       wmode = TYPE_MODE (TREE_TYPE (oprnd2));
706     }
707 
708   op = 0;
709   create_output_operand (&eops[op++], target, TYPE_MODE (ops->type));
710   create_convert_operand_from (&eops[op++], op0, tmode0, unsignedp);
711   if (op1)
712     create_convert_operand_from (&eops[op++], op1, tmode1, unsignedp);
713   if (wide_op)
714     create_convert_operand_from (&eops[op++], wide_op, wmode, unsignedp);
715   expand_insn (icode, op, eops);
716   return eops[0].value;
717 }
718 
719 /* Generate code to perform an operation specified by TERNARY_OPTAB
720    on operands OP0, OP1 and OP2, with result having machine-mode MODE.
721 
722    UNSIGNEDP is for the case where we have to widen the operands
723    to perform the operation.  It says to use zero-extension.
724 
725    If TARGET is nonzero, the value
726    is generated there, if it is convenient to do so.
727    In all cases an rtx is returned for the locus of the value;
728    this may or may not be TARGET.  */
729 
730 rtx
731 expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
732 		   rtx op1, rtx op2, rtx target, int unsignedp)
733 {
734   struct expand_operand ops[4];
735   enum insn_code icode = optab_handler (ternary_optab, mode);
736 
737   gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
738 
739   create_output_operand (&ops[0], target, mode);
740   create_convert_operand_from (&ops[1], op0, mode, unsignedp);
741   create_convert_operand_from (&ops[2], op1, mode, unsignedp);
742   create_convert_operand_from (&ops[3], op2, mode, unsignedp);
743   expand_insn (icode, 4, ops);
744   return ops[0].value;
745 }
746 
747 
748 /* Like expand_binop, but return a constant rtx if the result can be
749    calculated at compile time.  The arguments and return value are
750    otherwise the same as for expand_binop.  */
751 
752 rtx
753 simplify_expand_binop (machine_mode mode, optab binoptab,
754 		       rtx op0, rtx op1, rtx target, int unsignedp,
755 		       enum optab_methods methods)
756 {
757   if (CONSTANT_P (op0) && CONSTANT_P (op1))
758     {
759       rtx x = simplify_binary_operation (optab_to_code (binoptab),
760 					 mode, op0, op1);
761       if (x)
762 	return x;
763     }
764 
765   return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
766 }
767 
768 /* Like simplify_expand_binop, but always put the result in TARGET.
769    Return true if the expansion succeeded.  */
770 
771 bool
772 force_expand_binop (machine_mode mode, optab binoptab,
773 		    rtx op0, rtx op1, rtx target, int unsignedp,
774 		    enum optab_methods methods)
775 {
776   rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
777 				 target, unsignedp, methods);
778   if (x == 0)
779     return false;
780   if (x != target)
781     emit_move_insn (target, x);
782   return true;
783 }
784 
785 /* Create a new vector value in VMODE with all elements set to OP.  The
786    mode of OP must be the element mode of VMODE.  If OP is a constant,
787    then the return value will be a constant.  */
788 
789 static rtx
790 expand_vector_broadcast (machine_mode vmode, rtx op)
791 {
792   enum insn_code icode;
793   rtvec vec;
794   rtx ret;
795   int i, n;
796 
797   gcc_checking_assert (VECTOR_MODE_P (vmode));
798 
799   n = GET_MODE_NUNITS (vmode);
800   vec = rtvec_alloc (n);
801   for (i = 0; i < n; ++i)
802     RTVEC_ELT (vec, i) = op;
803 
804   if (CONSTANT_P (op))
805     return gen_rtx_CONST_VECTOR (vmode, vec);
806 
807   /* ??? If the target doesn't have a vec_init, then we have no easy way
808      of performing this operation.  Most of this sort of generic support
809      is hidden away in the vector lowering support in gimple.  */
810   icode = optab_handler (vec_init_optab, vmode);
811   if (icode == CODE_FOR_nothing)
812     return NULL;
813 
814   ret = gen_reg_rtx (vmode);
815   emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec)));
816 
817   return ret;
818 }
819 
820 /* This subroutine of expand_doubleword_shift handles the cases in which
821    the effective shift value is >= BITS_PER_WORD.  The arguments and return
822    value are the same as for the parent routine, except that SUPERWORD_OP1
823    is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
824    INTO_TARGET may be null if the caller has decided to calculate it.  */
825 
826 static bool
827 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
828 			rtx outof_target, rtx into_target,
829 			int unsignedp, enum optab_methods methods)
830 {
831   if (into_target != 0)
832     if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
833 			     into_target, unsignedp, methods))
834       return false;
835 
836   if (outof_target != 0)
837     {
838       /* For a signed right shift, we must fill OUTOF_TARGET with copies
839 	 of the sign bit, otherwise we must fill it with zeros.  */
840       if (binoptab != ashr_optab)
841 	emit_move_insn (outof_target, CONST0_RTX (word_mode));
842       else
843 	if (!force_expand_binop (word_mode, binoptab,
844 				 outof_input, GEN_INT (BITS_PER_WORD - 1),
845 				 outof_target, unsignedp, methods))
846 	  return false;
847     }
848   return true;
849 }
850 
851 /* This subroutine of expand_doubleword_shift handles the cases in which
852    the effective shift value is < BITS_PER_WORD.  The arguments and return
853    value are the same as for the parent routine.  */
854 
855 static bool
856 expand_subword_shift (machine_mode op1_mode, optab binoptab,
857 		      rtx outof_input, rtx into_input, rtx op1,
858 		      rtx outof_target, rtx into_target,
859 		      int unsignedp, enum optab_methods methods,
860 		      unsigned HOST_WIDE_INT shift_mask)
861 {
862   optab reverse_unsigned_shift, unsigned_shift;
863   rtx tmp, carries;
864 
865   reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
866   unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
867 
868   /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
869      We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
870      the opposite direction to BINOPTAB.  */
871   if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
872     {
873       carries = outof_input;
874       tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
875 					    op1_mode), op1_mode);
876       tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
877 				   0, true, methods);
878     }
879   else
880     {
881       /* We must avoid shifting by BITS_PER_WORD bits since that is either
882 	 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
883 	 has unknown behavior.  Do a single shift first, then shift by the
884 	 remainder.  It's OK to use ~OP1 as the remainder if shift counts
885 	 are truncated to the mode size.  */
886       carries = expand_binop (word_mode, reverse_unsigned_shift,
887 			      outof_input, const1_rtx, 0, unsignedp, methods);
888       if (shift_mask == BITS_PER_WORD - 1)
889 	{
890 	  tmp = immed_wide_int_const
891 	    (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
892 	  tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
893 				       0, true, methods);
894 	}
895       else
896 	{
897 	  tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
898 						op1_mode), op1_mode);
899 	  tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
900 				       0, true, methods);
901 	}
902     }
903   if (tmp == 0 || carries == 0)
904     return false;
905   carries = expand_binop (word_mode, reverse_unsigned_shift,
906 			  carries, tmp, 0, unsignedp, methods);
907   if (carries == 0)
908     return false;
909 
910   /* Shift INTO_INPUT logically by OP1.  This is the last use of INTO_INPUT
911      so the result can go directly into INTO_TARGET if convenient.  */
912   tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
913 		      into_target, unsignedp, methods);
914   if (tmp == 0)
915     return false;
916 
917   /* Now OR in the bits carried over from OUTOF_INPUT.  */
918   if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
919 			   into_target, unsignedp, methods))
920     return false;
921 
922   /* Use a standard word_mode shift for the out-of half.  */
923   if (outof_target != 0)
924     if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
925 			     outof_target, unsignedp, methods))
926       return false;
927 
928   return true;
929 }
930 
931 
932 #ifdef HAVE_conditional_move
933 /* Try implementing expand_doubleword_shift using conditional moves.
934    The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
935    otherwise it is by >= BITS_PER_WORD.  SUBWORD_OP1 and SUPERWORD_OP1
936    are the shift counts to use in the former and latter case.  All other
937    arguments are the same as the parent routine.  */
938 
939 static bool
940 expand_doubleword_shift_condmove (machine_mode op1_mode, optab binoptab,
941 				  enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
942 				  rtx outof_input, rtx into_input,
943 				  rtx subword_op1, rtx superword_op1,
944 				  rtx outof_target, rtx into_target,
945 				  int unsignedp, enum optab_methods methods,
946 				  unsigned HOST_WIDE_INT shift_mask)
947 {
948   rtx outof_superword, into_superword;
949 
950   /* Put the superword version of the output into OUTOF_SUPERWORD and
951      INTO_SUPERWORD.  */
952   outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
953   if (outof_target != 0 && subword_op1 == superword_op1)
954     {
955       /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
956 	 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD.  */
957       into_superword = outof_target;
958       if (!expand_superword_shift (binoptab, outof_input, superword_op1,
959 				   outof_superword, 0, unsignedp, methods))
960 	return false;
961     }
962   else
963     {
964       into_superword = gen_reg_rtx (word_mode);
965       if (!expand_superword_shift (binoptab, outof_input, superword_op1,
966 				   outof_superword, into_superword,
967 				   unsignedp, methods))
968 	return false;
969     }
970 
971   /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET.  */
972   if (!expand_subword_shift (op1_mode, binoptab,
973 			     outof_input, into_input, subword_op1,
974 			     outof_target, into_target,
975 			     unsignedp, methods, shift_mask))
976     return false;
977 
978   /* Select between them.  Do the INTO half first because INTO_SUPERWORD
979      might be the current value of OUTOF_TARGET.  */
980   if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
981 			      into_target, into_superword, word_mode, false))
982     return false;
983 
984   if (outof_target != 0)
985     if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
986 				outof_target, outof_superword,
987 				word_mode, false))
988       return false;
989 
990   return true;
991 }
992 #endif
993 
994 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
995    OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
996    input operand; the shift moves bits in the direction OUTOF_INPUT->
997    INTO_TARGET.  OUTOF_TARGET and INTO_TARGET are the equivalent words
998    of the target.  OP1 is the shift count and OP1_MODE is its mode.
999    If OP1 is constant, it will have been truncated as appropriate
1000    and is known to be nonzero.
1001 
1002    If SHIFT_MASK is zero, the result of word shifts is undefined when the
1003    shift count is outside the range [0, BITS_PER_WORD).  This routine must
1004    avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1005 
1006    If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1007    masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1008    fill with zeros or sign bits as appropriate.
1009 
1010    If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1011    a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1012    Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1013    In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1014    are undefined.
1015 
1016    BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop.  This function
1017    may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1018    OUTOF_INPUT and OUTOF_TARGET.  OUTOF_TARGET can be null if the parent
1019    function wants to calculate it itself.
1020 
1021    Return true if the shift could be successfully synthesized.  */
1022 
1023 static bool
1024 expand_doubleword_shift (machine_mode op1_mode, optab binoptab,
1025 			 rtx outof_input, rtx into_input, rtx op1,
1026 			 rtx outof_target, rtx into_target,
1027 			 int unsignedp, enum optab_methods methods,
1028 			 unsigned HOST_WIDE_INT shift_mask)
1029 {
1030   rtx superword_op1, tmp, cmp1, cmp2;
1031   enum rtx_code cmp_code;
1032 
1033   /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1034      fill the result with sign or zero bits as appropriate.  If so, the value
1035      of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1).   Recursively call
1036      this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1037      and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1038 
1039      This isn't worthwhile for constant shifts since the optimizers will
1040      cope better with in-range shift counts.  */
1041   if (shift_mask >= BITS_PER_WORD
1042       && outof_target != 0
1043       && !CONSTANT_P (op1))
1044     {
1045       if (!expand_doubleword_shift (op1_mode, binoptab,
1046 				    outof_input, into_input, op1,
1047 				    0, into_target,
1048 				    unsignedp, methods, shift_mask))
1049 	return false;
1050       if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1051 			       outof_target, unsignedp, methods))
1052 	return false;
1053       return true;
1054     }
1055 
1056   /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1057      is true when the effective shift value is less than BITS_PER_WORD.
1058      Set SUPERWORD_OP1 to the shift count that should be used to shift
1059      OUTOF_INPUT into INTO_TARGET when the condition is false.  */
1060   tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
1061   if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1062     {
1063       /* Set CMP1 to OP1 & BITS_PER_WORD.  The result is zero iff OP1
1064 	 is a subword shift count.  */
1065       cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1066 				    0, true, methods);
1067       cmp2 = CONST0_RTX (op1_mode);
1068       cmp_code = EQ;
1069       superword_op1 = op1;
1070     }
1071   else
1072     {
1073       /* Set CMP1 to OP1 - BITS_PER_WORD.  */
1074       cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1075 				    0, true, methods);
1076       cmp2 = CONST0_RTX (op1_mode);
1077       cmp_code = LT;
1078       superword_op1 = cmp1;
1079     }
1080   if (cmp1 == 0)
1081     return false;
1082 
1083   /* If we can compute the condition at compile time, pick the
1084      appropriate subroutine.  */
1085   tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1086   if (tmp != 0 && CONST_INT_P (tmp))
1087     {
1088       if (tmp == const0_rtx)
1089 	return expand_superword_shift (binoptab, outof_input, superword_op1,
1090 				       outof_target, into_target,
1091 				       unsignedp, methods);
1092       else
1093 	return expand_subword_shift (op1_mode, binoptab,
1094 				     outof_input, into_input, op1,
1095 				     outof_target, into_target,
1096 				     unsignedp, methods, shift_mask);
1097     }
1098 
1099 #ifdef HAVE_conditional_move
1100   /* Try using conditional moves to generate straight-line code.  */
1101   {
1102     rtx_insn *start = get_last_insn ();
1103     if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1104 					  cmp_code, cmp1, cmp2,
1105 					  outof_input, into_input,
1106 					  op1, superword_op1,
1107 					  outof_target, into_target,
1108 					  unsignedp, methods, shift_mask))
1109       return true;
1110     delete_insns_since (start);
1111   }
1112 #endif
1113 
1114   /* As a last resort, use branches to select the correct alternative.  */
1115   rtx_code_label *subword_label = gen_label_rtx ();
1116   rtx_code_label *done_label = gen_label_rtx ();
1117 
1118   NO_DEFER_POP;
1119   do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1120 			   0, 0, subword_label, -1);
1121   OK_DEFER_POP;
1122 
1123   if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1124 			       outof_target, into_target,
1125 			       unsignedp, methods))
1126     return false;
1127 
1128   emit_jump_insn (gen_jump (done_label));
1129   emit_barrier ();
1130   emit_label (subword_label);
1131 
1132   if (!expand_subword_shift (op1_mode, binoptab,
1133 			     outof_input, into_input, op1,
1134 			     outof_target, into_target,
1135 			     unsignedp, methods, shift_mask))
1136     return false;
1137 
1138   emit_label (done_label);
1139   return true;
1140 }
1141 
1142 /* Subroutine of expand_binop.  Perform a double word multiplication of
1143    operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1144    as the target's word_mode.  This function return NULL_RTX if anything
1145    goes wrong, in which case it may have already emitted instructions
1146    which need to be deleted.
1147 
1148    If we want to multiply two two-word values and have normal and widening
1149    multiplies of single-word values, we can do this with three smaller
1150    multiplications.
1151 
1152    The multiplication proceeds as follows:
1153 			         _______________________
1154 			        [__op0_high_|__op0_low__]
1155 			         _______________________
1156         *			[__op1_high_|__op1_low__]
1157         _______________________________________________
1158 			         _______________________
1159     (1)				[__op0_low__*__op1_low__]
1160 		     _______________________
1161     (2a)	    [__op0_low__*__op1_high_]
1162 		     _______________________
1163     (2b)	    [__op0_high_*__op1_low__]
1164          _______________________
1165     (3) [__op0_high_*__op1_high_]
1166 
1167 
1168   This gives a 4-word result.  Since we are only interested in the
1169   lower 2 words, partial result (3) and the upper words of (2a) and
1170   (2b) don't need to be calculated.  Hence (2a) and (2b) can be
1171   calculated using non-widening multiplication.
1172 
1173   (1), however, needs to be calculated with an unsigned widening
1174   multiplication.  If this operation is not directly supported we
1175   try using a signed widening multiplication and adjust the result.
1176   This adjustment works as follows:
1177 
1178       If both operands are positive then no adjustment is needed.
1179 
1180       If the operands have different signs, for example op0_low < 0 and
1181       op1_low >= 0, the instruction treats the most significant bit of
1182       op0_low as a sign bit instead of a bit with significance
1183       2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1184       with 2**BITS_PER_WORD - op0_low, and two's complements the
1185       result.  Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1186       the result.
1187 
1188       Similarly, if both operands are negative, we need to add
1189       (op0_low + op1_low) * 2**BITS_PER_WORD.
1190 
1191       We use a trick to adjust quickly.  We logically shift op0_low right
1192       (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1193       op0_high (op1_high) before it is used to calculate 2b (2a).  If no
1194       logical shift exists, we do an arithmetic right shift and subtract
1195       the 0 or -1.  */
1196 
1197 static rtx
1198 expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
1199 		       bool umulp, enum optab_methods methods)
1200 {
1201   int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1202   int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1203   rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1204   rtx product, adjust, product_high, temp;
1205 
1206   rtx op0_high = operand_subword_force (op0, high, mode);
1207   rtx op0_low = operand_subword_force (op0, low, mode);
1208   rtx op1_high = operand_subword_force (op1, high, mode);
1209   rtx op1_low = operand_subword_force (op1, low, mode);
1210 
1211   /* If we're using an unsigned multiply to directly compute the product
1212      of the low-order words of the operands and perform any required
1213      adjustments of the operands, we begin by trying two more multiplications
1214      and then computing the appropriate sum.
1215 
1216      We have checked above that the required addition is provided.
1217      Full-word addition will normally always succeed, especially if
1218      it is provided at all, so we don't worry about its failure.  The
1219      multiplication may well fail, however, so we do handle that.  */
1220 
1221   if (!umulp)
1222     {
1223       /* ??? This could be done with emit_store_flag where available.  */
1224       temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1225 			   NULL_RTX, 1, methods);
1226       if (temp)
1227 	op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1228 				 NULL_RTX, 0, OPTAB_DIRECT);
1229       else
1230 	{
1231 	  temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1232 			       NULL_RTX, 0, methods);
1233 	  if (!temp)
1234 	    return NULL_RTX;
1235 	  op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1236 				   NULL_RTX, 0, OPTAB_DIRECT);
1237 	}
1238 
1239       if (!op0_high)
1240 	return NULL_RTX;
1241     }
1242 
1243   adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1244 			 NULL_RTX, 0, OPTAB_DIRECT);
1245   if (!adjust)
1246     return NULL_RTX;
1247 
1248   /* OP0_HIGH should now be dead.  */
1249 
1250   if (!umulp)
1251     {
1252       /* ??? This could be done with emit_store_flag where available.  */
1253       temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1254 			   NULL_RTX, 1, methods);
1255       if (temp)
1256 	op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1257 				 NULL_RTX, 0, OPTAB_DIRECT);
1258       else
1259 	{
1260 	  temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1261 			       NULL_RTX, 0, methods);
1262 	  if (!temp)
1263 	    return NULL_RTX;
1264 	  op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1265 				   NULL_RTX, 0, OPTAB_DIRECT);
1266 	}
1267 
1268       if (!op1_high)
1269 	return NULL_RTX;
1270     }
1271 
1272   temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1273 		       NULL_RTX, 0, OPTAB_DIRECT);
1274   if (!temp)
1275     return NULL_RTX;
1276 
1277   /* OP1_HIGH should now be dead.  */
1278 
1279   adjust = expand_binop (word_mode, add_optab, adjust, temp,
1280 			 NULL_RTX, 0, OPTAB_DIRECT);
1281 
1282   if (target && !REG_P (target))
1283     target = NULL_RTX;
1284 
1285   if (umulp)
1286     product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1287 			    target, 1, OPTAB_DIRECT);
1288   else
1289     product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1290 			    target, 1, OPTAB_DIRECT);
1291 
1292   if (!product)
1293     return NULL_RTX;
1294 
1295   product_high = operand_subword (product, high, 1, mode);
1296   adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1297 			 NULL_RTX, 0, OPTAB_DIRECT);
1298   emit_move_insn (product_high, adjust);
1299   return product;
1300 }
1301 
1302 /* Wrapper around expand_binop which takes an rtx code to specify
1303    the operation to perform, not an optab pointer.  All other
1304    arguments are the same.  */
1305 rtx
1306 expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0,
1307 		     rtx op1, rtx target, int unsignedp,
1308 		     enum optab_methods methods)
1309 {
1310   optab binop = code_to_optab (code);
1311   gcc_assert (binop);
1312 
1313   return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1314 }
1315 
1316 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1317    binop.  Order them according to commutative_operand_precedence and, if
1318    possible, try to put TARGET or a pseudo first.  */
1319 static bool
1320 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1321 {
1322   int op0_prec = commutative_operand_precedence (op0);
1323   int op1_prec = commutative_operand_precedence (op1);
1324 
1325   if (op0_prec < op1_prec)
1326     return true;
1327 
1328   if (op0_prec > op1_prec)
1329     return false;
1330 
1331   /* With equal precedence, both orders are ok, but it is better if the
1332      first operand is TARGET, or if both TARGET and OP0 are pseudos.  */
1333   if (target == 0 || REG_P (target))
1334     return (REG_P (op1) && !REG_P (op0)) || target == op1;
1335   else
1336     return rtx_equal_p (op1, target);
1337 }
1338 
1339 /* Return true if BINOPTAB implements a shift operation.  */
1340 
1341 static bool
1342 shift_optab_p (optab binoptab)
1343 {
1344   switch (optab_to_code (binoptab))
1345     {
1346     case ASHIFT:
1347     case SS_ASHIFT:
1348     case US_ASHIFT:
1349     case ASHIFTRT:
1350     case LSHIFTRT:
1351     case ROTATE:
1352     case ROTATERT:
1353       return true;
1354 
1355     default:
1356       return false;
1357     }
1358 }
1359 
1360 /* Return true if BINOPTAB implements a commutative binary operation.  */
1361 
1362 static bool
1363 commutative_optab_p (optab binoptab)
1364 {
1365   return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH
1366 	  || binoptab == smul_widen_optab
1367 	  || binoptab == umul_widen_optab
1368 	  || binoptab == smul_highpart_optab
1369 	  || binoptab == umul_highpart_optab);
1370 }
1371 
1372 /* X is to be used in mode MODE as operand OPN to BINOPTAB.  If we're
1373    optimizing, and if the operand is a constant that costs more than
1374    1 instruction, force the constant into a register and return that
1375    register.  Return X otherwise.  UNSIGNEDP says whether X is unsigned.  */
1376 
1377 static rtx
1378 avoid_expensive_constant (machine_mode mode, optab binoptab,
1379 			  int opn, rtx x, bool unsignedp)
1380 {
1381   bool speed = optimize_insn_for_speed_p ();
1382 
1383   if (mode != VOIDmode
1384       && optimize
1385       && CONSTANT_P (x)
1386       && (rtx_cost (x, optab_to_code (binoptab), opn, speed)
1387 	  > set_src_cost (x, speed)))
1388     {
1389       if (CONST_INT_P (x))
1390 	{
1391 	  HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1392 	  if (intval != INTVAL (x))
1393 	    x = GEN_INT (intval);
1394 	}
1395       else
1396 	x = convert_modes (mode, VOIDmode, x, unsignedp);
1397       x = force_reg (mode, x);
1398     }
1399   return x;
1400 }
1401 
1402 /* Helper function for expand_binop: handle the case where there
1403    is an insn that directly implements the indicated operation.
1404    Returns null if this is not possible.  */
1405 static rtx
1406 expand_binop_directly (machine_mode mode, optab binoptab,
1407 		       rtx op0, rtx op1,
1408 		       rtx target, int unsignedp, enum optab_methods methods,
1409 		       rtx_insn *last)
1410 {
1411   machine_mode from_mode = widened_mode (mode, op0, op1);
1412   enum insn_code icode = find_widening_optab_handler (binoptab, mode,
1413 						      from_mode, 1);
1414   machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
1415   machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
1416   machine_mode mode0, mode1, tmp_mode;
1417   struct expand_operand ops[3];
1418   bool commutative_p;
1419   rtx pat;
1420   rtx xop0 = op0, xop1 = op1;
1421   rtx swap;
1422   bool canonicalize_op1 = false;
1423 
1424   /* If it is a commutative operator and the modes would match
1425      if we would swap the operands, we can save the conversions.  */
1426   commutative_p = commutative_optab_p (binoptab);
1427   if (commutative_p
1428       && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1
1429       && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode1)
1430     {
1431       swap = xop0;
1432       xop0 = xop1;
1433       xop1 = swap;
1434     }
1435 
1436   /* If we are optimizing, force expensive constants into a register.  */
1437   xop0 = avoid_expensive_constant (xmode0, binoptab, 0, xop0, unsignedp);
1438   if (!shift_optab_p (binoptab))
1439     xop1 = avoid_expensive_constant (xmode1, binoptab, 1, xop1, unsignedp);
1440   else if (xmode1 != VOIDmode)
1441     /* Shifts and rotates often use a different mode for op1 from op0;
1442        for VOIDmode constants we don't know the mode, so force it
1443        to be canonicalized using convert_modes.  */
1444     canonicalize_op1 = true;
1445 
1446   /* In case the insn wants input operands in modes different from
1447      those of the actual operands, convert the operands.  It would
1448      seem that we don't need to convert CONST_INTs, but we do, so
1449      that they're properly zero-extended, sign-extended or truncated
1450      for their mode.  */
1451 
1452   mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode;
1453   if (xmode0 != VOIDmode && xmode0 != mode0)
1454     {
1455       xop0 = convert_modes (xmode0, mode0, xop0, unsignedp);
1456       mode0 = xmode0;
1457     }
1458 
1459   mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1)
1460 	   ? GET_MODE (xop1) : mode);
1461   if (xmode1 != VOIDmode && xmode1 != mode1)
1462     {
1463       xop1 = convert_modes (xmode1, mode1, xop1, unsignedp);
1464       mode1 = xmode1;
1465     }
1466 
1467   /* If operation is commutative,
1468      try to make the first operand a register.
1469      Even better, try to make it the same as the target.
1470      Also try to make the last operand a constant.  */
1471   if (commutative_p
1472       && swap_commutative_operands_with_target (target, xop0, xop1))
1473     {
1474       swap = xop1;
1475       xop1 = xop0;
1476       xop0 = swap;
1477     }
1478 
1479   /* Now, if insn's predicates don't allow our operands, put them into
1480      pseudo regs.  */
1481 
1482   if (binoptab == vec_pack_trunc_optab
1483       || binoptab == vec_pack_usat_optab
1484       || binoptab == vec_pack_ssat_optab
1485       || binoptab == vec_pack_ufix_trunc_optab
1486       || binoptab == vec_pack_sfix_trunc_optab)
1487     {
1488       /* The mode of the result is different then the mode of the
1489 	 arguments.  */
1490       tmp_mode = insn_data[(int) icode].operand[0].mode;
1491       if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1492 	{
1493 	  delete_insns_since (last);
1494 	  return NULL_RTX;
1495 	}
1496     }
1497   else
1498     tmp_mode = mode;
1499 
1500   create_output_operand (&ops[0], target, tmp_mode);
1501   create_input_operand (&ops[1], xop0, mode0);
1502   create_input_operand (&ops[2], xop1, mode1);
1503   pat = maybe_gen_insn (icode, 3, ops);
1504   if (pat)
1505     {
1506       /* If PAT is composed of more than one insn, try to add an appropriate
1507 	 REG_EQUAL note to it.  If we can't because TEMP conflicts with an
1508 	 operand, call expand_binop again, this time without a target.  */
1509       if (INSN_P (pat) && NEXT_INSN (as_a <rtx_insn *> (pat)) != NULL_RTX
1510 	  && ! add_equal_note (as_a <rtx_insn *> (pat), ops[0].value,
1511 			       optab_to_code (binoptab),
1512 			       ops[1].value, ops[2].value))
1513 	{
1514 	  delete_insns_since (last);
1515 	  return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1516 			       unsignedp, methods);
1517 	}
1518 
1519       emit_insn (pat);
1520       return ops[0].value;
1521     }
1522   delete_insns_since (last);
1523   return NULL_RTX;
1524 }
1525 
1526 /* Generate code to perform an operation specified by BINOPTAB
1527    on operands OP0 and OP1, with result having machine-mode MODE.
1528 
1529    UNSIGNEDP is for the case where we have to widen the operands
1530    to perform the operation.  It says to use zero-extension.
1531 
1532    If TARGET is nonzero, the value
1533    is generated there, if it is convenient to do so.
1534    In all cases an rtx is returned for the locus of the value;
1535    this may or may not be TARGET.  */
1536 
1537 rtx
1538 expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1,
1539 	      rtx target, int unsignedp, enum optab_methods methods)
1540 {
1541   enum optab_methods next_methods
1542     = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1543        ? OPTAB_WIDEN : methods);
1544   enum mode_class mclass;
1545   machine_mode wider_mode, inner_mode;
1546   rtx libfunc;
1547   rtx temp;
1548   rtx_insn *entry_last = get_last_insn ();
1549   rtx_insn *last;
1550 
1551   mclass = GET_MODE_CLASS (mode);
1552 
1553   /* If subtracting an integer constant, convert this into an addition of
1554      the negated constant.  */
1555 
1556   if (binoptab == sub_optab && CONST_INT_P (op1))
1557     {
1558       op1 = negate_rtx (mode, op1);
1559       binoptab = add_optab;
1560     }
1561   /* For shifts, constant invalid op1 might be expanded from different
1562      mode than MODE.  As those are invalid, force them to a register
1563      to avoid further problems during expansion.  */
1564   else if (CONST_INT_P (op1)
1565 	   && shift_optab_p (binoptab)
1566 	   && (inner_mode = (GET_MODE_INNER (mode) == VOIDmode
1567 			     ? mode : GET_MODE_INNER (mode))) != VOIDmode
1568 	   && UINTVAL (op1) >= GET_MODE_BITSIZE (inner_mode))
1569     {
1570       op1 = gen_int_mode (INTVAL (op1), inner_mode);
1571       op1 = force_reg (inner_mode, op1);
1572     }
1573 
1574   /* Record where to delete back to if we backtrack.  */
1575   last = get_last_insn ();
1576 
1577   /* If we can do it with a three-operand insn, do so.  */
1578 
1579   if (methods != OPTAB_MUST_WIDEN
1580       && find_widening_optab_handler (binoptab, mode,
1581 				      widened_mode (mode, op0, op1), 1)
1582 	    != CODE_FOR_nothing)
1583     {
1584       temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1585 				    unsignedp, methods, last);
1586       if (temp)
1587 	return temp;
1588     }
1589 
1590   /* If we were trying to rotate, and that didn't work, try rotating
1591      the other direction before falling back to shifts and bitwise-or.  */
1592   if (((binoptab == rotl_optab
1593 	&& optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
1594        || (binoptab == rotr_optab
1595 	   && optab_handler (rotl_optab, mode) != CODE_FOR_nothing))
1596       && mclass == MODE_INT)
1597     {
1598       optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1599       rtx newop1;
1600       unsigned int bits = GET_MODE_PRECISION (mode);
1601 
1602       if (CONST_INT_P (op1))
1603         newop1 = GEN_INT (bits - INTVAL (op1));
1604       else if (targetm.shift_truncation_mask (mode) == bits - 1)
1605         newop1 = negate_rtx (GET_MODE (op1), op1);
1606       else
1607         newop1 = expand_binop (GET_MODE (op1), sub_optab,
1608 			       gen_int_mode (bits, GET_MODE (op1)), op1,
1609 			       NULL_RTX, unsignedp, OPTAB_DIRECT);
1610 
1611       temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1612 				    target, unsignedp, methods, last);
1613       if (temp)
1614 	return temp;
1615     }
1616 
1617   /* If this is a multiply, see if we can do a widening operation that
1618      takes operands of this mode and makes a wider mode.  */
1619 
1620   if (binoptab == smul_optab
1621       && GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1622       && (widening_optab_handler ((unsignedp ? umul_widen_optab
1623 					     : smul_widen_optab),
1624 				  GET_MODE_2XWIDER_MODE (mode), mode)
1625 	  != CODE_FOR_nothing))
1626     {
1627       temp = expand_binop (GET_MODE_2XWIDER_MODE (mode),
1628 			   unsignedp ? umul_widen_optab : smul_widen_optab,
1629 			   op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1630 
1631       if (temp != 0)
1632 	{
1633 	  if (GET_MODE_CLASS (mode) == MODE_INT
1634 	      && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp)))
1635 	    return gen_lowpart (mode, temp);
1636 	  else
1637 	    return convert_to_mode (mode, temp, unsignedp);
1638 	}
1639     }
1640 
1641   /* If this is a vector shift by a scalar, see if we can do a vector
1642      shift by a vector.  If so, broadcast the scalar into a vector.  */
1643   if (mclass == MODE_VECTOR_INT)
1644     {
1645       optab otheroptab = unknown_optab;
1646 
1647       if (binoptab == ashl_optab)
1648 	otheroptab = vashl_optab;
1649       else if (binoptab == ashr_optab)
1650 	otheroptab = vashr_optab;
1651       else if (binoptab == lshr_optab)
1652 	otheroptab = vlshr_optab;
1653       else if (binoptab == rotl_optab)
1654 	otheroptab = vrotl_optab;
1655       else if (binoptab == rotr_optab)
1656 	otheroptab = vrotr_optab;
1657 
1658       if (otheroptab && optab_handler (otheroptab, mode) != CODE_FOR_nothing)
1659 	{
1660 	  rtx vop1 = expand_vector_broadcast (mode, op1);
1661 	  if (vop1)
1662 	    {
1663 	      temp = expand_binop_directly (mode, otheroptab, op0, vop1,
1664 					    target, unsignedp, methods, last);
1665 	      if (temp)
1666 		return temp;
1667 	    }
1668 	}
1669     }
1670 
1671   /* Look for a wider mode of the same class for which we think we
1672      can open-code the operation.  Check for a widening multiply at the
1673      wider mode as well.  */
1674 
1675   if (CLASS_HAS_WIDER_MODES_P (mclass)
1676       && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1677     for (wider_mode = GET_MODE_WIDER_MODE (mode);
1678 	 wider_mode != VOIDmode;
1679 	 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1680       {
1681 	if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing
1682 	    || (binoptab == smul_optab
1683 		&& GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1684 		&& (find_widening_optab_handler ((unsignedp
1685 						  ? umul_widen_optab
1686 						  : smul_widen_optab),
1687 						 GET_MODE_WIDER_MODE (wider_mode),
1688 						 mode, 0)
1689 		    != CODE_FOR_nothing)))
1690 	  {
1691 	    rtx xop0 = op0, xop1 = op1;
1692 	    int no_extend = 0;
1693 
1694 	    /* For certain integer operations, we need not actually extend
1695 	       the narrow operands, as long as we will truncate
1696 	       the results to the same narrowness.  */
1697 
1698 	    if ((binoptab == ior_optab || binoptab == and_optab
1699 		 || binoptab == xor_optab
1700 		 || binoptab == add_optab || binoptab == sub_optab
1701 		 || binoptab == smul_optab || binoptab == ashl_optab)
1702 		&& mclass == MODE_INT)
1703 	      {
1704 		no_extend = 1;
1705 		xop0 = avoid_expensive_constant (mode, binoptab, 0,
1706 						 xop0, unsignedp);
1707 		if (binoptab != ashl_optab)
1708 		  xop1 = avoid_expensive_constant (mode, binoptab, 1,
1709 						   xop1, unsignedp);
1710 	      }
1711 
1712 	    xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1713 
1714 	    /* The second operand of a shift must always be extended.  */
1715 	    xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1716 				  no_extend && binoptab != ashl_optab);
1717 
1718 	    temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1719 				 unsignedp, OPTAB_DIRECT);
1720 	    if (temp)
1721 	      {
1722 		if (mclass != MODE_INT
1723                     || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
1724 		  {
1725 		    if (target == 0)
1726 		      target = gen_reg_rtx (mode);
1727 		    convert_move (target, temp, 0);
1728 		    return target;
1729 		  }
1730 		else
1731 		  return gen_lowpart (mode, temp);
1732 	      }
1733 	    else
1734 	      delete_insns_since (last);
1735 	  }
1736       }
1737 
1738   /* If operation is commutative,
1739      try to make the first operand a register.
1740      Even better, try to make it the same as the target.
1741      Also try to make the last operand a constant.  */
1742   if (commutative_optab_p (binoptab)
1743       && swap_commutative_operands_with_target (target, op0, op1))
1744     {
1745       temp = op1;
1746       op1 = op0;
1747       op0 = temp;
1748     }
1749 
1750   /* These can be done a word at a time.  */
1751   if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1752       && mclass == MODE_INT
1753       && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1754       && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1755     {
1756       int i;
1757       rtx_insn *insns;
1758 
1759       /* If TARGET is the same as one of the operands, the REG_EQUAL note
1760 	 won't be accurate, so use a new target.  */
1761       if (target == 0
1762 	  || target == op0
1763 	  || target == op1
1764 	  || !valid_multiword_target_p (target))
1765 	target = gen_reg_rtx (mode);
1766 
1767       start_sequence ();
1768 
1769       /* Do the actual arithmetic.  */
1770       for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1771 	{
1772 	  rtx target_piece = operand_subword (target, i, 1, mode);
1773 	  rtx x = expand_binop (word_mode, binoptab,
1774 				operand_subword_force (op0, i, mode),
1775 				operand_subword_force (op1, i, mode),
1776 				target_piece, unsignedp, next_methods);
1777 
1778 	  if (x == 0)
1779 	    break;
1780 
1781 	  if (target_piece != x)
1782 	    emit_move_insn (target_piece, x);
1783 	}
1784 
1785       insns = get_insns ();
1786       end_sequence ();
1787 
1788       if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1789 	{
1790 	  emit_insn (insns);
1791 	  return target;
1792 	}
1793     }
1794 
1795   /* Synthesize double word shifts from single word shifts.  */
1796   if ((binoptab == lshr_optab || binoptab == ashl_optab
1797        || binoptab == ashr_optab)
1798       && mclass == MODE_INT
1799       && (CONST_INT_P (op1) || optimize_insn_for_speed_p ())
1800       && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1801       && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)
1802       && optab_handler (binoptab, word_mode) != CODE_FOR_nothing
1803       && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1804       && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1805     {
1806       unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1807       machine_mode op1_mode;
1808 
1809       double_shift_mask = targetm.shift_truncation_mask (mode);
1810       shift_mask = targetm.shift_truncation_mask (word_mode);
1811       op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1812 
1813       /* Apply the truncation to constant shifts.  */
1814       if (double_shift_mask > 0 && CONST_INT_P (op1))
1815 	op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1816 
1817       if (op1 == CONST0_RTX (op1_mode))
1818 	return op0;
1819 
1820       /* Make sure that this is a combination that expand_doubleword_shift
1821 	 can handle.  See the comments there for details.  */
1822       if (double_shift_mask == 0
1823 	  || (shift_mask == BITS_PER_WORD - 1
1824 	      && double_shift_mask == BITS_PER_WORD * 2 - 1))
1825 	{
1826 	  rtx_insn *insns;
1827 	  rtx into_target, outof_target;
1828 	  rtx into_input, outof_input;
1829 	  int left_shift, outof_word;
1830 
1831 	  /* If TARGET is the same as one of the operands, the REG_EQUAL note
1832 	     won't be accurate, so use a new target.  */
1833 	  if (target == 0
1834 	      || target == op0
1835 	      || target == op1
1836 	      || !valid_multiword_target_p (target))
1837 	    target = gen_reg_rtx (mode);
1838 
1839 	  start_sequence ();
1840 
1841 	  /* OUTOF_* is the word we are shifting bits away from, and
1842 	     INTO_* is the word that we are shifting bits towards, thus
1843 	     they differ depending on the direction of the shift and
1844 	     WORDS_BIG_ENDIAN.  */
1845 
1846 	  left_shift = binoptab == ashl_optab;
1847 	  outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1848 
1849 	  outof_target = operand_subword (target, outof_word, 1, mode);
1850 	  into_target = operand_subword (target, 1 - outof_word, 1, mode);
1851 
1852 	  outof_input = operand_subword_force (op0, outof_word, mode);
1853 	  into_input = operand_subword_force (op0, 1 - outof_word, mode);
1854 
1855 	  if (expand_doubleword_shift (op1_mode, binoptab,
1856 				       outof_input, into_input, op1,
1857 				       outof_target, into_target,
1858 				       unsignedp, next_methods, shift_mask))
1859 	    {
1860 	      insns = get_insns ();
1861 	      end_sequence ();
1862 
1863 	      emit_insn (insns);
1864 	      return target;
1865 	    }
1866 	  end_sequence ();
1867 	}
1868     }
1869 
1870   /* Synthesize double word rotates from single word shifts.  */
1871   if ((binoptab == rotl_optab || binoptab == rotr_optab)
1872       && mclass == MODE_INT
1873       && CONST_INT_P (op1)
1874       && GET_MODE_PRECISION (mode) == 2 * BITS_PER_WORD
1875       && optab_handler (ashl_optab, word_mode) != CODE_FOR_nothing
1876       && optab_handler (lshr_optab, word_mode) != CODE_FOR_nothing)
1877     {
1878       rtx_insn *insns;
1879       rtx into_target, outof_target;
1880       rtx into_input, outof_input;
1881       rtx inter;
1882       int shift_count, left_shift, outof_word;
1883 
1884       /* If TARGET is the same as one of the operands, the REG_EQUAL note
1885 	 won't be accurate, so use a new target. Do this also if target is not
1886 	 a REG, first because having a register instead may open optimization
1887 	 opportunities, and second because if target and op0 happen to be MEMs
1888 	 designating the same location, we would risk clobbering it too early
1889 	 in the code sequence we generate below.  */
1890       if (target == 0
1891 	  || target == op0
1892 	  || target == op1
1893 	  || !REG_P (target)
1894 	  || !valid_multiword_target_p (target))
1895 	target = gen_reg_rtx (mode);
1896 
1897       start_sequence ();
1898 
1899       shift_count = INTVAL (op1);
1900 
1901       /* OUTOF_* is the word we are shifting bits away from, and
1902 	 INTO_* is the word that we are shifting bits towards, thus
1903 	 they differ depending on the direction of the shift and
1904 	 WORDS_BIG_ENDIAN.  */
1905 
1906       left_shift = (binoptab == rotl_optab);
1907       outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1908 
1909       outof_target = operand_subword (target, outof_word, 1, mode);
1910       into_target = operand_subword (target, 1 - outof_word, 1, mode);
1911 
1912       outof_input = operand_subword_force (op0, outof_word, mode);
1913       into_input = operand_subword_force (op0, 1 - outof_word, mode);
1914 
1915       if (shift_count == BITS_PER_WORD)
1916 	{
1917 	  /* This is just a word swap.  */
1918 	  emit_move_insn (outof_target, into_input);
1919 	  emit_move_insn (into_target, outof_input);
1920 	  inter = const0_rtx;
1921 	}
1922       else
1923 	{
1924 	  rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1925 	  rtx first_shift_count, second_shift_count;
1926 	  optab reverse_unsigned_shift, unsigned_shift;
1927 
1928 	  reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1929 				    ? lshr_optab : ashl_optab);
1930 
1931 	  unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1932 			    ? ashl_optab : lshr_optab);
1933 
1934 	  if (shift_count > BITS_PER_WORD)
1935 	    {
1936 	      first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1937 	      second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1938 	    }
1939 	  else
1940 	    {
1941 	      first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1942 	      second_shift_count = GEN_INT (shift_count);
1943 	    }
1944 
1945 	  into_temp1 = expand_binop (word_mode, unsigned_shift,
1946 				     outof_input, first_shift_count,
1947 				     NULL_RTX, unsignedp, next_methods);
1948 	  into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1949 				     into_input, second_shift_count,
1950 				     NULL_RTX, unsignedp, next_methods);
1951 
1952 	  if (into_temp1 != 0 && into_temp2 != 0)
1953 	    inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1954 				  into_target, unsignedp, next_methods);
1955 	  else
1956 	    inter = 0;
1957 
1958 	  if (inter != 0 && inter != into_target)
1959 	    emit_move_insn (into_target, inter);
1960 
1961 	  outof_temp1 = expand_binop (word_mode, unsigned_shift,
1962 				      into_input, first_shift_count,
1963 				      NULL_RTX, unsignedp, next_methods);
1964 	  outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1965 				      outof_input, second_shift_count,
1966 				      NULL_RTX, unsignedp, next_methods);
1967 
1968 	  if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1969 	    inter = expand_binop (word_mode, ior_optab,
1970 				  outof_temp1, outof_temp2,
1971 				  outof_target, unsignedp, next_methods);
1972 
1973 	  if (inter != 0 && inter != outof_target)
1974 	    emit_move_insn (outof_target, inter);
1975 	}
1976 
1977       insns = get_insns ();
1978       end_sequence ();
1979 
1980       if (inter != 0)
1981 	{
1982 	  emit_insn (insns);
1983 	  return target;
1984 	}
1985     }
1986 
1987   /* These can be done a word at a time by propagating carries.  */
1988   if ((binoptab == add_optab || binoptab == sub_optab)
1989       && mclass == MODE_INT
1990       && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1991       && optab_handler (binoptab, word_mode) != CODE_FOR_nothing)
1992     {
1993       unsigned int i;
1994       optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1995       const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1996       rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1997       rtx xop0, xop1, xtarget;
1998 
1999       /* We can handle either a 1 or -1 value for the carry.  If STORE_FLAG
2000 	 value is one of those, use it.  Otherwise, use 1 since it is the
2001 	 one easiest to get.  */
2002 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
2003       int normalizep = STORE_FLAG_VALUE;
2004 #else
2005       int normalizep = 1;
2006 #endif
2007 
2008       /* Prepare the operands.  */
2009       xop0 = force_reg (mode, op0);
2010       xop1 = force_reg (mode, op1);
2011 
2012       xtarget = gen_reg_rtx (mode);
2013 
2014       if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
2015 	target = xtarget;
2016 
2017       /* Indicate for flow that the entire target reg is being set.  */
2018       if (REG_P (target))
2019 	emit_clobber (xtarget);
2020 
2021       /* Do the actual arithmetic.  */
2022       for (i = 0; i < nwords; i++)
2023 	{
2024 	  int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
2025 	  rtx target_piece = operand_subword (xtarget, index, 1, mode);
2026 	  rtx op0_piece = operand_subword_force (xop0, index, mode);
2027 	  rtx op1_piece = operand_subword_force (xop1, index, mode);
2028 	  rtx x;
2029 
2030 	  /* Main add/subtract of the input operands.  */
2031 	  x = expand_binop (word_mode, binoptab,
2032 			    op0_piece, op1_piece,
2033 			    target_piece, unsignedp, next_methods);
2034 	  if (x == 0)
2035 	    break;
2036 
2037 	  if (i + 1 < nwords)
2038 	    {
2039 	      /* Store carry from main add/subtract.  */
2040 	      carry_out = gen_reg_rtx (word_mode);
2041 	      carry_out = emit_store_flag_force (carry_out,
2042 						 (binoptab == add_optab
2043 						  ? LT : GT),
2044 						 x, op0_piece,
2045 						 word_mode, 1, normalizep);
2046 	    }
2047 
2048 	  if (i > 0)
2049 	    {
2050 	      rtx newx;
2051 
2052 	      /* Add/subtract previous carry to main result.  */
2053 	      newx = expand_binop (word_mode,
2054 				   normalizep == 1 ? binoptab : otheroptab,
2055 				   x, carry_in,
2056 				   NULL_RTX, 1, next_methods);
2057 
2058 	      if (i + 1 < nwords)
2059 		{
2060 		  /* Get out carry from adding/subtracting carry in.  */
2061 		  rtx carry_tmp = gen_reg_rtx (word_mode);
2062 		  carry_tmp = emit_store_flag_force (carry_tmp,
2063 						     (binoptab == add_optab
2064 						      ? LT : GT),
2065 						     newx, x,
2066 						     word_mode, 1, normalizep);
2067 
2068 		  /* Logical-ior the two poss. carry together.  */
2069 		  carry_out = expand_binop (word_mode, ior_optab,
2070 					    carry_out, carry_tmp,
2071 					    carry_out, 0, next_methods);
2072 		  if (carry_out == 0)
2073 		    break;
2074 		}
2075 	      emit_move_insn (target_piece, newx);
2076 	    }
2077 	  else
2078 	    {
2079 	      if (x != target_piece)
2080 		emit_move_insn (target_piece, x);
2081 	    }
2082 
2083 	  carry_in = carry_out;
2084 	}
2085 
2086       if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2087 	{
2088 	  if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
2089 	      || ! rtx_equal_p (target, xtarget))
2090 	    {
2091 	      rtx temp = emit_move_insn (target, xtarget);
2092 
2093 	      set_dst_reg_note (temp, REG_EQUAL,
2094 				gen_rtx_fmt_ee (optab_to_code (binoptab),
2095 						mode, copy_rtx (xop0),
2096 						copy_rtx (xop1)),
2097 				target);
2098 	    }
2099 	  else
2100 	    target = xtarget;
2101 
2102 	  return target;
2103 	}
2104 
2105       else
2106 	delete_insns_since (last);
2107     }
2108 
2109   /* Attempt to synthesize double word multiplies using a sequence of word
2110      mode multiplications.  We first attempt to generate a sequence using a
2111      more efficient unsigned widening multiply, and if that fails we then
2112      try using a signed widening multiply.  */
2113 
2114   if (binoptab == smul_optab
2115       && mclass == MODE_INT
2116       && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2117       && optab_handler (smul_optab, word_mode) != CODE_FOR_nothing
2118       && optab_handler (add_optab, word_mode) != CODE_FOR_nothing)
2119     {
2120       rtx product = NULL_RTX;
2121       if (widening_optab_handler (umul_widen_optab, mode, word_mode)
2122 	    != CODE_FOR_nothing)
2123 	{
2124 	  product = expand_doubleword_mult (mode, op0, op1, target,
2125 					    true, methods);
2126 	  if (!product)
2127 	    delete_insns_since (last);
2128 	}
2129 
2130       if (product == NULL_RTX
2131 	  && widening_optab_handler (smul_widen_optab, mode, word_mode)
2132 		!= CODE_FOR_nothing)
2133 	{
2134 	  product = expand_doubleword_mult (mode, op0, op1, target,
2135 					    false, methods);
2136 	  if (!product)
2137 	    delete_insns_since (last);
2138 	}
2139 
2140       if (product != NULL_RTX)
2141 	{
2142 	  if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
2143 	    {
2144 	      temp = emit_move_insn (target ? target : product, product);
2145 	      set_dst_reg_note (temp,
2146 				REG_EQUAL,
2147 				gen_rtx_fmt_ee (MULT, mode,
2148 						copy_rtx (op0),
2149 						copy_rtx (op1)),
2150 				target ? target : product);
2151 	    }
2152 	  return product;
2153 	}
2154     }
2155 
2156   /* It can't be open-coded in this mode.
2157      Use a library call if one is available and caller says that's ok.  */
2158 
2159   libfunc = optab_libfunc (binoptab, mode);
2160   if (libfunc
2161       && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2162     {
2163       rtx_insn *insns;
2164       rtx op1x = op1;
2165       machine_mode op1_mode = mode;
2166       rtx value;
2167 
2168       start_sequence ();
2169 
2170       if (shift_optab_p (binoptab))
2171 	{
2172 	  op1_mode = targetm.libgcc_shift_count_mode ();
2173 	  /* Specify unsigned here,
2174 	     since negative shift counts are meaningless.  */
2175 	  op1x = convert_to_mode (op1_mode, op1, 1);
2176 	}
2177 
2178       if (GET_MODE (op0) != VOIDmode
2179 	  && GET_MODE (op0) != mode)
2180 	op0 = convert_to_mode (mode, op0, unsignedp);
2181 
2182       /* Pass 1 for NO_QUEUE so we don't lose any increments
2183 	 if the libcall is cse'd or moved.  */
2184       value = emit_library_call_value (libfunc,
2185 				       NULL_RTX, LCT_CONST, mode, 2,
2186 				       op0, mode, op1x, op1_mode);
2187 
2188       insns = get_insns ();
2189       end_sequence ();
2190 
2191       target = gen_reg_rtx (mode);
2192       emit_libcall_block_1 (insns, target, value,
2193 			    gen_rtx_fmt_ee (optab_to_code (binoptab),
2194 					    mode, op0, op1),
2195 			    trapv_binoptab_p (binoptab));
2196 
2197       return target;
2198     }
2199 
2200   delete_insns_since (last);
2201 
2202   /* It can't be done in this mode.  Can we do it in a wider mode?  */
2203 
2204   if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2205 	 || methods == OPTAB_MUST_WIDEN))
2206     {
2207       /* Caller says, don't even try.  */
2208       delete_insns_since (entry_last);
2209       return 0;
2210     }
2211 
2212   /* Compute the value of METHODS to pass to recursive calls.
2213      Don't allow widening to be tried recursively.  */
2214 
2215   methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2216 
2217   /* Look for a wider mode of the same class for which it appears we can do
2218      the operation.  */
2219 
2220   if (CLASS_HAS_WIDER_MODES_P (mclass))
2221     {
2222       for (wider_mode = GET_MODE_WIDER_MODE (mode);
2223 	   wider_mode != VOIDmode;
2224 	   wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2225 	{
2226 	  if (find_widening_optab_handler (binoptab, wider_mode, mode, 1)
2227 		  != CODE_FOR_nothing
2228 	      || (methods == OPTAB_LIB
2229 		  && optab_libfunc (binoptab, wider_mode)))
2230 	    {
2231 	      rtx xop0 = op0, xop1 = op1;
2232 	      int no_extend = 0;
2233 
2234 	      /* For certain integer operations, we need not actually extend
2235 		 the narrow operands, as long as we will truncate
2236 		 the results to the same narrowness.  */
2237 
2238 	      if ((binoptab == ior_optab || binoptab == and_optab
2239 		   || binoptab == xor_optab
2240 		   || binoptab == add_optab || binoptab == sub_optab
2241 		   || binoptab == smul_optab || binoptab == ashl_optab)
2242 		  && mclass == MODE_INT)
2243 		no_extend = 1;
2244 
2245 	      xop0 = widen_operand (xop0, wider_mode, mode,
2246 				    unsignedp, no_extend);
2247 
2248 	      /* The second operand of a shift must always be extended.  */
2249 	      xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2250 				    no_extend && binoptab != ashl_optab);
2251 
2252 	      temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2253 				   unsignedp, methods);
2254 	      if (temp)
2255 		{
2256 		  if (mclass != MODE_INT
2257 		      || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
2258 		    {
2259 		      if (target == 0)
2260 			target = gen_reg_rtx (mode);
2261 		      convert_move (target, temp, 0);
2262 		      return target;
2263 		    }
2264 		  else
2265 		    return gen_lowpart (mode, temp);
2266 		}
2267 	      else
2268 		delete_insns_since (last);
2269 	    }
2270 	}
2271     }
2272 
2273   delete_insns_since (entry_last);
2274   return 0;
2275 }
2276 
2277 /* Expand a binary operator which has both signed and unsigned forms.
2278    UOPTAB is the optab for unsigned operations, and SOPTAB is for
2279    signed operations.
2280 
2281    If we widen unsigned operands, we may use a signed wider operation instead
2282    of an unsigned wider operation, since the result would be the same.  */
2283 
2284 rtx
2285 sign_expand_binop (machine_mode mode, optab uoptab, optab soptab,
2286 		   rtx op0, rtx op1, rtx target, int unsignedp,
2287 		   enum optab_methods methods)
2288 {
2289   rtx temp;
2290   optab direct_optab = unsignedp ? uoptab : soptab;
2291   bool save_enable;
2292 
2293   /* Do it without widening, if possible.  */
2294   temp = expand_binop (mode, direct_optab, op0, op1, target,
2295 		       unsignedp, OPTAB_DIRECT);
2296   if (temp || methods == OPTAB_DIRECT)
2297     return temp;
2298 
2299   /* Try widening to a signed int.  Disable any direct use of any
2300      signed insn in the current mode.  */
2301   save_enable = swap_optab_enable (soptab, mode, false);
2302 
2303   temp = expand_binop (mode, soptab, op0, op1, target,
2304 		       unsignedp, OPTAB_WIDEN);
2305 
2306   /* For unsigned operands, try widening to an unsigned int.  */
2307   if (!temp && unsignedp)
2308     temp = expand_binop (mode, uoptab, op0, op1, target,
2309 			 unsignedp, OPTAB_WIDEN);
2310   if (temp || methods == OPTAB_WIDEN)
2311     goto egress;
2312 
2313   /* Use the right width libcall if that exists.  */
2314   temp = expand_binop (mode, direct_optab, op0, op1, target,
2315 		       unsignedp, OPTAB_LIB);
2316   if (temp || methods == OPTAB_LIB)
2317     goto egress;
2318 
2319   /* Must widen and use a libcall, use either signed or unsigned.  */
2320   temp = expand_binop (mode, soptab, op0, op1, target,
2321 		       unsignedp, methods);
2322   if (!temp && unsignedp)
2323     temp = expand_binop (mode, uoptab, op0, op1, target,
2324 			 unsignedp, methods);
2325 
2326  egress:
2327   /* Undo the fiddling above.  */
2328   if (save_enable)
2329     swap_optab_enable (soptab, mode, true);
2330   return temp;
2331 }
2332 
2333 /* Generate code to perform an operation specified by UNOPPTAB
2334    on operand OP0, with two results to TARG0 and TARG1.
2335    We assume that the order of the operands for the instruction
2336    is TARG0, TARG1, OP0.
2337 
2338    Either TARG0 or TARG1 may be zero, but what that means is that
2339    the result is not actually wanted.  We will generate it into
2340    a dummy pseudo-reg and discard it.  They may not both be zero.
2341 
2342    Returns 1 if this operation can be performed; 0 if not.  */
2343 
2344 int
2345 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2346 		    int unsignedp)
2347 {
2348   machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2349   enum mode_class mclass;
2350   machine_mode wider_mode;
2351   rtx_insn *entry_last = get_last_insn ();
2352   rtx_insn *last;
2353 
2354   mclass = GET_MODE_CLASS (mode);
2355 
2356   if (!targ0)
2357     targ0 = gen_reg_rtx (mode);
2358   if (!targ1)
2359     targ1 = gen_reg_rtx (mode);
2360 
2361   /* Record where to go back to if we fail.  */
2362   last = get_last_insn ();
2363 
2364   if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
2365     {
2366       struct expand_operand ops[3];
2367       enum insn_code icode = optab_handler (unoptab, mode);
2368 
2369       create_fixed_operand (&ops[0], targ0);
2370       create_fixed_operand (&ops[1], targ1);
2371       create_convert_operand_from (&ops[2], op0, mode, unsignedp);
2372       if (maybe_expand_insn (icode, 3, ops))
2373 	return 1;
2374     }
2375 
2376   /* It can't be done in this mode.  Can we do it in a wider mode?  */
2377 
2378   if (CLASS_HAS_WIDER_MODES_P (mclass))
2379     {
2380       for (wider_mode = GET_MODE_WIDER_MODE (mode);
2381 	   wider_mode != VOIDmode;
2382 	   wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2383 	{
2384 	  if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2385 	    {
2386 	      rtx t0 = gen_reg_rtx (wider_mode);
2387 	      rtx t1 = gen_reg_rtx (wider_mode);
2388 	      rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2389 
2390 	      if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2391 		{
2392 		  convert_move (targ0, t0, unsignedp);
2393 		  convert_move (targ1, t1, unsignedp);
2394 		  return 1;
2395 		}
2396 	      else
2397 		delete_insns_since (last);
2398 	    }
2399 	}
2400     }
2401 
2402   delete_insns_since (entry_last);
2403   return 0;
2404 }
2405 
2406 /* Generate code to perform an operation specified by BINOPTAB
2407    on operands OP0 and OP1, with two results to TARG1 and TARG2.
2408    We assume that the order of the operands for the instruction
2409    is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2410    [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2411 
2412    Either TARG0 or TARG1 may be zero, but what that means is that
2413    the result is not actually wanted.  We will generate it into
2414    a dummy pseudo-reg and discard it.  They may not both be zero.
2415 
2416    Returns 1 if this operation can be performed; 0 if not.  */
2417 
2418 int
2419 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2420 		     int unsignedp)
2421 {
2422   machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2423   enum mode_class mclass;
2424   machine_mode wider_mode;
2425   rtx_insn *entry_last = get_last_insn ();
2426   rtx_insn *last;
2427 
2428   mclass = GET_MODE_CLASS (mode);
2429 
2430   if (!targ0)
2431     targ0 = gen_reg_rtx (mode);
2432   if (!targ1)
2433     targ1 = gen_reg_rtx (mode);
2434 
2435   /* Record where to go back to if we fail.  */
2436   last = get_last_insn ();
2437 
2438   if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
2439     {
2440       struct expand_operand ops[4];
2441       enum insn_code icode = optab_handler (binoptab, mode);
2442       machine_mode mode0 = insn_data[icode].operand[1].mode;
2443       machine_mode mode1 = insn_data[icode].operand[2].mode;
2444       rtx xop0 = op0, xop1 = op1;
2445 
2446       /* If we are optimizing, force expensive constants into a register.  */
2447       xop0 = avoid_expensive_constant (mode0, binoptab, 0, xop0, unsignedp);
2448       xop1 = avoid_expensive_constant (mode1, binoptab, 1, xop1, unsignedp);
2449 
2450       create_fixed_operand (&ops[0], targ0);
2451       create_convert_operand_from (&ops[1], op0, mode, unsignedp);
2452       create_convert_operand_from (&ops[2], op1, mode, unsignedp);
2453       create_fixed_operand (&ops[3], targ1);
2454       if (maybe_expand_insn (icode, 4, ops))
2455 	return 1;
2456       delete_insns_since (last);
2457     }
2458 
2459   /* It can't be done in this mode.  Can we do it in a wider mode?  */
2460 
2461   if (CLASS_HAS_WIDER_MODES_P (mclass))
2462     {
2463       for (wider_mode = GET_MODE_WIDER_MODE (mode);
2464 	   wider_mode != VOIDmode;
2465 	   wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2466 	{
2467 	  if (optab_handler (binoptab, wider_mode) != CODE_FOR_nothing)
2468 	    {
2469 	      rtx t0 = gen_reg_rtx (wider_mode);
2470 	      rtx t1 = gen_reg_rtx (wider_mode);
2471 	      rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2472 	      rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2473 
2474 	      if (expand_twoval_binop (binoptab, cop0, cop1,
2475 				       t0, t1, unsignedp))
2476 		{
2477 		  convert_move (targ0, t0, unsignedp);
2478 		  convert_move (targ1, t1, unsignedp);
2479 		  return 1;
2480 		}
2481 	      else
2482 		delete_insns_since (last);
2483 	    }
2484 	}
2485     }
2486 
2487   delete_insns_since (entry_last);
2488   return 0;
2489 }
2490 
2491 /* Expand the two-valued library call indicated by BINOPTAB, but
2492    preserve only one of the values.  If TARG0 is non-NULL, the first
2493    value is placed into TARG0; otherwise the second value is placed
2494    into TARG1.  Exactly one of TARG0 and TARG1 must be non-NULL.  The
2495    value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2496    This routine assumes that the value returned by the library call is
2497    as if the return value was of an integral mode twice as wide as the
2498    mode of OP0.  Returns 1 if the call was successful.  */
2499 
2500 bool
2501 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2502 			     rtx targ0, rtx targ1, enum rtx_code code)
2503 {
2504   machine_mode mode;
2505   machine_mode libval_mode;
2506   rtx libval;
2507   rtx_insn *insns;
2508   rtx libfunc;
2509 
2510   /* Exactly one of TARG0 or TARG1 should be non-NULL.  */
2511   gcc_assert (!targ0 != !targ1);
2512 
2513   mode = GET_MODE (op0);
2514   libfunc = optab_libfunc (binoptab, mode);
2515   if (!libfunc)
2516     return false;
2517 
2518   /* The value returned by the library function will have twice as
2519      many bits as the nominal MODE.  */
2520   libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2521 					MODE_INT);
2522   start_sequence ();
2523   libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2524 				    libval_mode, 2,
2525 				    op0, mode,
2526 				    op1, mode);
2527   /* Get the part of VAL containing the value that we want.  */
2528   libval = simplify_gen_subreg (mode, libval, libval_mode,
2529 				targ0 ? 0 : GET_MODE_SIZE (mode));
2530   insns = get_insns ();
2531   end_sequence ();
2532   /* Move the into the desired location.  */
2533   emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2534 		      gen_rtx_fmt_ee (code, mode, op0, op1));
2535 
2536   return true;
2537 }
2538 
2539 
2540 /* Wrapper around expand_unop which takes an rtx code to specify
2541    the operation to perform, not an optab pointer.  All other
2542    arguments are the same.  */
2543 rtx
2544 expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0,
2545 		    rtx target, int unsignedp)
2546 {
2547   optab unop = code_to_optab (code);
2548   gcc_assert (unop);
2549 
2550   return expand_unop (mode, unop, op0, target, unsignedp);
2551 }
2552 
2553 /* Try calculating
2554 	(clz:narrow x)
2555    as
2556 	(clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2557 
2558    A similar operation can be used for clrsb.  UNOPTAB says which operation
2559    we are trying to expand.  */
2560 static rtx
2561 widen_leading (machine_mode mode, rtx op0, rtx target, optab unoptab)
2562 {
2563   enum mode_class mclass = GET_MODE_CLASS (mode);
2564   if (CLASS_HAS_WIDER_MODES_P (mclass))
2565     {
2566       machine_mode wider_mode;
2567       for (wider_mode = GET_MODE_WIDER_MODE (mode);
2568 	   wider_mode != VOIDmode;
2569 	   wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2570 	{
2571 	  if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
2572 	    {
2573 	      rtx xop0, temp;
2574 	      rtx_insn *last;
2575 
2576 	      last = get_last_insn ();
2577 
2578 	      if (target == 0)
2579 		target = gen_reg_rtx (mode);
2580 	      xop0 = widen_operand (op0, wider_mode, mode,
2581 				    unoptab != clrsb_optab, false);
2582 	      temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2583 				  unoptab != clrsb_optab);
2584 	      if (temp != 0)
2585 		temp = expand_binop
2586 		  (wider_mode, sub_optab, temp,
2587 		   gen_int_mode (GET_MODE_PRECISION (wider_mode)
2588 				 - GET_MODE_PRECISION (mode),
2589 				 wider_mode),
2590 		   target, true, OPTAB_DIRECT);
2591 	      if (temp == 0)
2592 		delete_insns_since (last);
2593 
2594 	      return temp;
2595 	    }
2596 	}
2597     }
2598   return 0;
2599 }
2600 
2601 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2602    quantities, choosing which based on whether the high word is nonzero.  */
2603 static rtx
2604 expand_doubleword_clz (machine_mode mode, rtx op0, rtx target)
2605 {
2606   rtx xop0 = force_reg (mode, op0);
2607   rtx subhi = gen_highpart (word_mode, xop0);
2608   rtx sublo = gen_lowpart (word_mode, xop0);
2609   rtx_code_label *hi0_label = gen_label_rtx ();
2610   rtx_code_label *after_label = gen_label_rtx ();
2611   rtx_insn *seq;
2612   rtx temp, result;
2613 
2614   /* If we were not given a target, use a word_mode register, not a
2615      'mode' register.  The result will fit, and nobody is expecting
2616      anything bigger (the return type of __builtin_clz* is int).  */
2617   if (!target)
2618     target = gen_reg_rtx (word_mode);
2619 
2620   /* In any case, write to a word_mode scratch in both branches of the
2621      conditional, so we can ensure there is a single move insn setting
2622      'target' to tag a REG_EQUAL note on.  */
2623   result = gen_reg_rtx (word_mode);
2624 
2625   start_sequence ();
2626 
2627   /* If the high word is not equal to zero,
2628      then clz of the full value is clz of the high word.  */
2629   emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2630 			   word_mode, true, hi0_label);
2631 
2632   temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2633   if (!temp)
2634     goto fail;
2635 
2636   if (temp != result)
2637     convert_move (result, temp, true);
2638 
2639   emit_jump_insn (gen_jump (after_label));
2640   emit_barrier ();
2641 
2642   /* Else clz of the full value is clz of the low word plus the number
2643      of bits in the high word.  */
2644   emit_label (hi0_label);
2645 
2646   temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2647   if (!temp)
2648     goto fail;
2649   temp = expand_binop (word_mode, add_optab, temp,
2650 		       gen_int_mode (GET_MODE_BITSIZE (word_mode), word_mode),
2651 		       result, true, OPTAB_DIRECT);
2652   if (!temp)
2653     goto fail;
2654   if (temp != result)
2655     convert_move (result, temp, true);
2656 
2657   emit_label (after_label);
2658   convert_move (target, result, true);
2659 
2660   seq = get_insns ();
2661   end_sequence ();
2662 
2663   add_equal_note (seq, target, CLZ, xop0, 0);
2664   emit_insn (seq);
2665   return target;
2666 
2667  fail:
2668   end_sequence ();
2669   return 0;
2670 }
2671 
2672 /* Try calculating
2673 	(bswap:narrow x)
2674    as
2675 	(lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))).  */
2676 static rtx
2677 widen_bswap (machine_mode mode, rtx op0, rtx target)
2678 {
2679   enum mode_class mclass = GET_MODE_CLASS (mode);
2680   machine_mode wider_mode;
2681   rtx x;
2682   rtx_insn *last;
2683 
2684   if (!CLASS_HAS_WIDER_MODES_P (mclass))
2685     return NULL_RTX;
2686 
2687   for (wider_mode = GET_MODE_WIDER_MODE (mode);
2688        wider_mode != VOIDmode;
2689        wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2690     if (optab_handler (bswap_optab, wider_mode) != CODE_FOR_nothing)
2691       goto found;
2692   return NULL_RTX;
2693 
2694  found:
2695   last = get_last_insn ();
2696 
2697   x = widen_operand (op0, wider_mode, mode, true, true);
2698   x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2699 
2700   gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode)
2701 	      && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode));
2702   if (x != 0)
2703     x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2704 		      GET_MODE_BITSIZE (wider_mode)
2705 		      - GET_MODE_BITSIZE (mode),
2706 		      NULL_RTX, true);
2707 
2708   if (x != 0)
2709     {
2710       if (target == 0)
2711 	target = gen_reg_rtx (mode);
2712       emit_move_insn (target, gen_lowpart (mode, x));
2713     }
2714   else
2715     delete_insns_since (last);
2716 
2717   return target;
2718 }
2719 
2720 /* Try calculating bswap as two bswaps of two word-sized operands.  */
2721 
2722 static rtx
2723 expand_doubleword_bswap (machine_mode mode, rtx op, rtx target)
2724 {
2725   rtx t0, t1;
2726 
2727   t1 = expand_unop (word_mode, bswap_optab,
2728 		    operand_subword_force (op, 0, mode), NULL_RTX, true);
2729   t0 = expand_unop (word_mode, bswap_optab,
2730 		    operand_subword_force (op, 1, mode), NULL_RTX, true);
2731 
2732   if (target == 0 || !valid_multiword_target_p (target))
2733     target = gen_reg_rtx (mode);
2734   if (REG_P (target))
2735     emit_clobber (target);
2736   emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2737   emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2738 
2739   return target;
2740 }
2741 
2742 /* Try calculating (parity x) as (and (popcount x) 1), where
2743    popcount can also be done in a wider mode.  */
2744 static rtx
2745 expand_parity (machine_mode mode, rtx op0, rtx target)
2746 {
2747   enum mode_class mclass = GET_MODE_CLASS (mode);
2748   if (CLASS_HAS_WIDER_MODES_P (mclass))
2749     {
2750       machine_mode wider_mode;
2751       for (wider_mode = mode; wider_mode != VOIDmode;
2752 	   wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2753 	{
2754 	  if (optab_handler (popcount_optab, wider_mode) != CODE_FOR_nothing)
2755 	    {
2756 	      rtx xop0, temp;
2757 	      rtx_insn *last;
2758 
2759 	      last = get_last_insn ();
2760 
2761 	      if (target == 0)
2762 		target = gen_reg_rtx (mode);
2763 	      xop0 = widen_operand (op0, wider_mode, mode, true, false);
2764 	      temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2765 				  true);
2766 	      if (temp != 0)
2767 		temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2768 				     target, true, OPTAB_DIRECT);
2769 	      if (temp == 0)
2770 		delete_insns_since (last);
2771 
2772 	      return temp;
2773 	    }
2774 	}
2775     }
2776   return 0;
2777 }
2778 
2779 /* Try calculating ctz(x) as K - clz(x & -x) ,
2780    where K is GET_MODE_PRECISION(mode) - 1.
2781 
2782    Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2783    don't have to worry about what the hardware does in that case.  (If
2784    the clz instruction produces the usual value at 0, which is K, the
2785    result of this code sequence will be -1; expand_ffs, below, relies
2786    on this.  It might be nice to have it be K instead, for consistency
2787    with the (very few) processors that provide a ctz with a defined
2788    value, but that would take one more instruction, and it would be
2789    less convenient for expand_ffs anyway.  */
2790 
2791 static rtx
2792 expand_ctz (machine_mode mode, rtx op0, rtx target)
2793 {
2794   rtx_insn *seq;
2795   rtx temp;
2796 
2797   if (optab_handler (clz_optab, mode) == CODE_FOR_nothing)
2798     return 0;
2799 
2800   start_sequence ();
2801 
2802   temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2803   if (temp)
2804     temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2805 			 true, OPTAB_DIRECT);
2806   if (temp)
2807     temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2808   if (temp)
2809     temp = expand_binop (mode, sub_optab,
2810 			 gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode),
2811 			 temp, target,
2812 			 true, OPTAB_DIRECT);
2813   if (temp == 0)
2814     {
2815       end_sequence ();
2816       return 0;
2817     }
2818 
2819   seq = get_insns ();
2820   end_sequence ();
2821 
2822   add_equal_note (seq, temp, CTZ, op0, 0);
2823   emit_insn (seq);
2824   return temp;
2825 }
2826 
2827 
2828 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2829    else with the sequence used by expand_clz.
2830 
2831    The ffs builtin promises to return zero for a zero value and ctz/clz
2832    may have an undefined value in that case.  If they do not give us a
2833    convenient value, we have to generate a test and branch.  */
2834 static rtx
2835 expand_ffs (machine_mode mode, rtx op0, rtx target)
2836 {
2837   HOST_WIDE_INT val = 0;
2838   bool defined_at_zero = false;
2839   rtx temp;
2840   rtx_insn *seq;
2841 
2842   if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing)
2843     {
2844       start_sequence ();
2845 
2846       temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2847       if (!temp)
2848 	goto fail;
2849 
2850       defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2851     }
2852   else if (optab_handler (clz_optab, mode) != CODE_FOR_nothing)
2853     {
2854       start_sequence ();
2855       temp = expand_ctz (mode, op0, 0);
2856       if (!temp)
2857 	goto fail;
2858 
2859       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2860 	{
2861 	  defined_at_zero = true;
2862 	  val = (GET_MODE_PRECISION (mode) - 1) - val;
2863 	}
2864     }
2865   else
2866     return 0;
2867 
2868   if (defined_at_zero && val == -1)
2869     /* No correction needed at zero.  */;
2870   else
2871     {
2872       /* We don't try to do anything clever with the situation found
2873 	 on some processors (eg Alpha) where ctz(0:mode) ==
2874 	 bitsize(mode).  If someone can think of a way to send N to -1
2875 	 and leave alone all values in the range 0..N-1 (where N is a
2876 	 power of two), cheaper than this test-and-branch, please add it.
2877 
2878 	 The test-and-branch is done after the operation itself, in case
2879 	 the operation sets condition codes that can be recycled for this.
2880 	 (This is true on i386, for instance.)  */
2881 
2882       rtx_code_label *nonzero_label = gen_label_rtx ();
2883       emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2884 			       mode, true, nonzero_label);
2885 
2886       convert_move (temp, GEN_INT (-1), false);
2887       emit_label (nonzero_label);
2888     }
2889 
2890   /* temp now has a value in the range -1..bitsize-1.  ffs is supposed
2891      to produce a value in the range 0..bitsize.  */
2892   temp = expand_binop (mode, add_optab, temp, gen_int_mode (1, mode),
2893 		       target, false, OPTAB_DIRECT);
2894   if (!temp)
2895     goto fail;
2896 
2897   seq = get_insns ();
2898   end_sequence ();
2899 
2900   add_equal_note (seq, temp, FFS, op0, 0);
2901   emit_insn (seq);
2902   return temp;
2903 
2904  fail:
2905   end_sequence ();
2906   return 0;
2907 }
2908 
2909 /* Extract the OMODE lowpart from VAL, which has IMODE.  Under certain
2910    conditions, VAL may already be a SUBREG against which we cannot generate
2911    a further SUBREG.  In this case, we expect forcing the value into a
2912    register will work around the situation.  */
2913 
2914 static rtx
2915 lowpart_subreg_maybe_copy (machine_mode omode, rtx val,
2916 			   machine_mode imode)
2917 {
2918   rtx ret;
2919   ret = lowpart_subreg (omode, val, imode);
2920   if (ret == NULL)
2921     {
2922       val = force_reg (imode, val);
2923       ret = lowpart_subreg (omode, val, imode);
2924       gcc_assert (ret != NULL);
2925     }
2926   return ret;
2927 }
2928 
2929 /* Expand a floating point absolute value or negation operation via a
2930    logical operation on the sign bit.  */
2931 
2932 static rtx
2933 expand_absneg_bit (enum rtx_code code, machine_mode mode,
2934 		   rtx op0, rtx target)
2935 {
2936   const struct real_format *fmt;
2937   int bitpos, word, nwords, i;
2938   machine_mode imode;
2939   rtx temp;
2940   rtx_insn *insns;
2941 
2942   /* The format has to have a simple sign bit.  */
2943   fmt = REAL_MODE_FORMAT (mode);
2944   if (fmt == NULL)
2945     return NULL_RTX;
2946 
2947   bitpos = fmt->signbit_rw;
2948   if (bitpos < 0)
2949     return NULL_RTX;
2950 
2951   /* Don't create negative zeros if the format doesn't support them.  */
2952   if (code == NEG && !fmt->has_signed_zero)
2953     return NULL_RTX;
2954 
2955   if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2956     {
2957       imode = int_mode_for_mode (mode);
2958       if (imode == BLKmode)
2959 	return NULL_RTX;
2960       word = 0;
2961       nwords = 1;
2962     }
2963   else
2964     {
2965       imode = word_mode;
2966 
2967       if (FLOAT_WORDS_BIG_ENDIAN)
2968 	word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2969       else
2970 	word = bitpos / BITS_PER_WORD;
2971       bitpos = bitpos % BITS_PER_WORD;
2972       nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2973     }
2974 
2975   wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
2976   if (code == ABS)
2977     mask = ~mask;
2978 
2979   if (target == 0
2980       || target == op0
2981       || (nwords > 1 && !valid_multiword_target_p (target)))
2982     target = gen_reg_rtx (mode);
2983 
2984   if (nwords > 1)
2985     {
2986       start_sequence ();
2987 
2988       for (i = 0; i < nwords; ++i)
2989 	{
2990 	  rtx targ_piece = operand_subword (target, i, 1, mode);
2991 	  rtx op0_piece = operand_subword_force (op0, i, mode);
2992 
2993 	  if (i == word)
2994 	    {
2995 	      temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2996 				   op0_piece,
2997 				   immed_wide_int_const (mask, imode),
2998 				   targ_piece, 1, OPTAB_LIB_WIDEN);
2999 	      if (temp != targ_piece)
3000 		emit_move_insn (targ_piece, temp);
3001 	    }
3002 	  else
3003 	    emit_move_insn (targ_piece, op0_piece);
3004 	}
3005 
3006       insns = get_insns ();
3007       end_sequence ();
3008 
3009       emit_insn (insns);
3010     }
3011   else
3012     {
3013       temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3014 			   gen_lowpart (imode, op0),
3015 			   immed_wide_int_const (mask, imode),
3016 		           gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3017       target = lowpart_subreg_maybe_copy (mode, temp, imode);
3018 
3019       set_dst_reg_note (get_last_insn (), REG_EQUAL,
3020 			gen_rtx_fmt_e (code, mode, copy_rtx (op0)),
3021 			target);
3022     }
3023 
3024   return target;
3025 }
3026 
3027 /* As expand_unop, but will fail rather than attempt the operation in a
3028    different mode or with a libcall.  */
3029 static rtx
3030 expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
3031 	     int unsignedp)
3032 {
3033   if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
3034     {
3035       struct expand_operand ops[2];
3036       enum insn_code icode = optab_handler (unoptab, mode);
3037       rtx_insn *last = get_last_insn ();
3038       rtx pat;
3039 
3040       create_output_operand (&ops[0], target, mode);
3041       create_convert_operand_from (&ops[1], op0, mode, unsignedp);
3042       pat = maybe_gen_insn (icode, 2, ops);
3043       if (pat)
3044 	{
3045 	  if (INSN_P (pat) && NEXT_INSN (as_a <rtx_insn *> (pat)) != NULL_RTX
3046 	      && ! add_equal_note (as_a <rtx_insn *> (pat), ops[0].value,
3047 				   optab_to_code (unoptab),
3048 				   ops[1].value, NULL_RTX))
3049 	    {
3050 	      delete_insns_since (last);
3051 	      return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3052 	    }
3053 
3054 	  emit_insn (pat);
3055 
3056 	  return ops[0].value;
3057 	}
3058     }
3059   return 0;
3060 }
3061 
3062 /* Generate code to perform an operation specified by UNOPTAB
3063    on operand OP0, with result having machine-mode MODE.
3064 
3065    UNSIGNEDP is for the case where we have to widen the operands
3066    to perform the operation.  It says to use zero-extension.
3067 
3068    If TARGET is nonzero, the value
3069    is generated there, if it is convenient to do so.
3070    In all cases an rtx is returned for the locus of the value;
3071    this may or may not be TARGET.  */
3072 
3073 rtx
3074 expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target,
3075 	     int unsignedp)
3076 {
3077   enum mode_class mclass = GET_MODE_CLASS (mode);
3078   machine_mode wider_mode;
3079   rtx temp;
3080   rtx libfunc;
3081 
3082   temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3083   if (temp)
3084     return temp;
3085 
3086   /* It can't be done in this mode.  Can we open-code it in a wider mode?  */
3087 
3088   /* Widening (or narrowing) clz needs special treatment.  */
3089   if (unoptab == clz_optab)
3090     {
3091       temp = widen_leading (mode, op0, target, unoptab);
3092       if (temp)
3093 	return temp;
3094 
3095       if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3096 	  && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3097 	{
3098 	  temp = expand_doubleword_clz (mode, op0, target);
3099 	  if (temp)
3100 	    return temp;
3101 	}
3102 
3103       goto try_libcall;
3104     }
3105 
3106   if (unoptab == clrsb_optab)
3107     {
3108       temp = widen_leading (mode, op0, target, unoptab);
3109       if (temp)
3110 	return temp;
3111       goto try_libcall;
3112     }
3113 
3114   /* Widening (or narrowing) bswap needs special treatment.  */
3115   if (unoptab == bswap_optab)
3116     {
3117       /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3118 	 or ROTATERT.  First try these directly; if this fails, then try the
3119 	 obvious pair of shifts with allowed widening, as this will probably
3120 	 be always more efficient than the other fallback methods.  */
3121       if (mode == HImode)
3122 	{
3123 	  rtx_insn *last;
3124 	  rtx temp1, temp2;
3125 
3126 	  if (optab_handler (rotl_optab, mode) != CODE_FOR_nothing)
3127 	    {
3128 	      temp = expand_binop (mode, rotl_optab, op0, GEN_INT (8), target,
3129 				   unsignedp, OPTAB_DIRECT);
3130 	      if (temp)
3131 		return temp;
3132 	     }
3133 
3134 	  if (optab_handler (rotr_optab, mode) != CODE_FOR_nothing)
3135 	    {
3136 	      temp = expand_binop (mode, rotr_optab, op0, GEN_INT (8), target,
3137 				   unsignedp, OPTAB_DIRECT);
3138 	      if (temp)
3139 		return temp;
3140 	    }
3141 
3142 	  last = get_last_insn ();
3143 
3144 	  temp1 = expand_binop (mode, ashl_optab, op0, GEN_INT (8), NULL_RTX,
3145 			        unsignedp, OPTAB_WIDEN);
3146 	  temp2 = expand_binop (mode, lshr_optab, op0, GEN_INT (8), NULL_RTX,
3147 			        unsignedp, OPTAB_WIDEN);
3148 	  if (temp1 && temp2)
3149 	    {
3150 	      temp = expand_binop (mode, ior_optab, temp1, temp2, target,
3151 				   unsignedp, OPTAB_WIDEN);
3152 	      if (temp)
3153 		return temp;
3154 	    }
3155 
3156 	  delete_insns_since (last);
3157 	}
3158 
3159       temp = widen_bswap (mode, op0, target);
3160       if (temp)
3161 	return temp;
3162 
3163       if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3164 	  && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3165 	{
3166 	  temp = expand_doubleword_bswap (mode, op0, target);
3167 	  if (temp)
3168 	    return temp;
3169 	}
3170 
3171       goto try_libcall;
3172     }
3173 
3174   if (CLASS_HAS_WIDER_MODES_P (mclass))
3175     for (wider_mode = GET_MODE_WIDER_MODE (mode);
3176 	 wider_mode != VOIDmode;
3177 	 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3178       {
3179 	if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing)
3180 	  {
3181 	    rtx xop0 = op0;
3182 	    rtx_insn *last = get_last_insn ();
3183 
3184 	    /* For certain operations, we need not actually extend
3185 	       the narrow operand, as long as we will truncate the
3186 	       results to the same narrowness.  */
3187 
3188 	    xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3189 				  (unoptab == neg_optab
3190 				   || unoptab == one_cmpl_optab)
3191 				  && mclass == MODE_INT);
3192 
3193 	    temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3194 				unsignedp);
3195 
3196 	    if (temp)
3197 	      {
3198 		if (mclass != MODE_INT
3199 		    || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode))
3200 		  {
3201 		    if (target == 0)
3202 		      target = gen_reg_rtx (mode);
3203 		    convert_move (target, temp, 0);
3204 		    return target;
3205 		  }
3206 		else
3207 		  return gen_lowpart (mode, temp);
3208 	      }
3209 	    else
3210 	      delete_insns_since (last);
3211 	  }
3212       }
3213 
3214   /* These can be done a word at a time.  */
3215   if (unoptab == one_cmpl_optab
3216       && mclass == MODE_INT
3217       && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3218       && optab_handler (unoptab, word_mode) != CODE_FOR_nothing)
3219     {
3220       int i;
3221       rtx_insn *insns;
3222 
3223       if (target == 0 || target == op0 || !valid_multiword_target_p (target))
3224 	target = gen_reg_rtx (mode);
3225 
3226       start_sequence ();
3227 
3228       /* Do the actual arithmetic.  */
3229       for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3230 	{
3231 	  rtx target_piece = operand_subword (target, i, 1, mode);
3232 	  rtx x = expand_unop (word_mode, unoptab,
3233 			       operand_subword_force (op0, i, mode),
3234 			       target_piece, unsignedp);
3235 
3236 	  if (target_piece != x)
3237 	    emit_move_insn (target_piece, x);
3238 	}
3239 
3240       insns = get_insns ();
3241       end_sequence ();
3242 
3243       emit_insn (insns);
3244       return target;
3245     }
3246 
3247   if (optab_to_code (unoptab) == NEG)
3248     {
3249       /* Try negating floating point values by flipping the sign bit.  */
3250       if (SCALAR_FLOAT_MODE_P (mode))
3251 	{
3252 	  temp = expand_absneg_bit (NEG, mode, op0, target);
3253 	  if (temp)
3254 	    return temp;
3255 	}
3256 
3257       /* If there is no negation pattern, and we have no negative zero,
3258 	 try subtracting from zero.  */
3259       if (!HONOR_SIGNED_ZEROS (mode))
3260 	{
3261 	  temp = expand_binop (mode, (unoptab == negv_optab
3262 				      ? subv_optab : sub_optab),
3263 			       CONST0_RTX (mode), op0, target,
3264 			       unsignedp, OPTAB_DIRECT);
3265 	  if (temp)
3266 	    return temp;
3267 	}
3268     }
3269 
3270   /* Try calculating parity (x) as popcount (x) % 2.  */
3271   if (unoptab == parity_optab)
3272     {
3273       temp = expand_parity (mode, op0, target);
3274       if (temp)
3275 	return temp;
3276     }
3277 
3278   /* Try implementing ffs (x) in terms of clz (x).  */
3279   if (unoptab == ffs_optab)
3280     {
3281       temp = expand_ffs (mode, op0, target);
3282       if (temp)
3283 	return temp;
3284     }
3285 
3286   /* Try implementing ctz (x) in terms of clz (x).  */
3287   if (unoptab == ctz_optab)
3288     {
3289       temp = expand_ctz (mode, op0, target);
3290       if (temp)
3291 	return temp;
3292     }
3293 
3294  try_libcall:
3295   /* Now try a library call in this mode.  */
3296   libfunc = optab_libfunc (unoptab, mode);
3297   if (libfunc)
3298     {
3299       rtx_insn *insns;
3300       rtx value;
3301       rtx eq_value;
3302       machine_mode outmode = mode;
3303 
3304       /* All of these functions return small values.  Thus we choose to
3305 	 have them return something that isn't a double-word.  */
3306       if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3307 	  || unoptab == clrsb_optab || unoptab == popcount_optab
3308 	  || unoptab == parity_optab)
3309 	outmode
3310 	  = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node),
3311 					  optab_libfunc (unoptab, mode)));
3312 
3313       start_sequence ();
3314 
3315       /* Pass 1 for NO_QUEUE so we don't lose any increments
3316 	 if the libcall is cse'd or moved.  */
3317       value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3318 				       1, op0, mode);
3319       insns = get_insns ();
3320       end_sequence ();
3321 
3322       target = gen_reg_rtx (outmode);
3323       eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0);
3324       if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3325 	eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3326       else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3327 	eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3328       emit_libcall_block_1 (insns, target, value, eq_value,
3329 			    trapv_unoptab_p (unoptab));
3330 
3331       return target;
3332     }
3333 
3334   /* It can't be done in this mode.  Can we do it in a wider mode?  */
3335 
3336   if (CLASS_HAS_WIDER_MODES_P (mclass))
3337     {
3338       for (wider_mode = GET_MODE_WIDER_MODE (mode);
3339 	   wider_mode != VOIDmode;
3340 	   wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3341 	{
3342 	  if (optab_handler (unoptab, wider_mode) != CODE_FOR_nothing
3343 	      || optab_libfunc (unoptab, wider_mode))
3344 	    {
3345 	      rtx xop0 = op0;
3346 	      rtx_insn *last = get_last_insn ();
3347 
3348 	      /* For certain operations, we need not actually extend
3349 		 the narrow operand, as long as we will truncate the
3350 		 results to the same narrowness.  */
3351 	      xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3352 				    (unoptab == neg_optab
3353 				     || unoptab == one_cmpl_optab
3354 				     || unoptab == bswap_optab)
3355 				    && mclass == MODE_INT);
3356 
3357 	      temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3358 				  unsignedp);
3359 
3360 	      /* If we are generating clz using wider mode, adjust the
3361 		 result.  Similarly for clrsb.  */
3362 	      if ((unoptab == clz_optab || unoptab == clrsb_optab)
3363 		  && temp != 0)
3364 		temp = expand_binop
3365 		  (wider_mode, sub_optab, temp,
3366 		   gen_int_mode (GET_MODE_PRECISION (wider_mode)
3367 				 - GET_MODE_PRECISION (mode),
3368 				 wider_mode),
3369 		   target, true, OPTAB_DIRECT);
3370 
3371 	      /* Likewise for bswap.  */
3372 	      if (unoptab == bswap_optab && temp != 0)
3373 		{
3374 		  gcc_assert (GET_MODE_PRECISION (wider_mode)
3375 			      == GET_MODE_BITSIZE (wider_mode)
3376 			      && GET_MODE_PRECISION (mode)
3377 				 == GET_MODE_BITSIZE (mode));
3378 
3379 		  temp = expand_shift (RSHIFT_EXPR, wider_mode, temp,
3380 				       GET_MODE_BITSIZE (wider_mode)
3381 				       - GET_MODE_BITSIZE (mode),
3382 				       NULL_RTX, true);
3383 		}
3384 
3385 	      if (temp)
3386 		{
3387 		  if (mclass != MODE_INT)
3388 		    {
3389 		      if (target == 0)
3390 			target = gen_reg_rtx (mode);
3391 		      convert_move (target, temp, 0);
3392 		      return target;
3393 		    }
3394 		  else
3395 		    return gen_lowpart (mode, temp);
3396 		}
3397 	      else
3398 		delete_insns_since (last);
3399 	    }
3400 	}
3401     }
3402 
3403   /* One final attempt at implementing negation via subtraction,
3404      this time allowing widening of the operand.  */
3405   if (optab_to_code (unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode))
3406     {
3407       rtx temp;
3408       temp = expand_binop (mode,
3409                            unoptab == negv_optab ? subv_optab : sub_optab,
3410                            CONST0_RTX (mode), op0,
3411                            target, unsignedp, OPTAB_LIB_WIDEN);
3412       if (temp)
3413         return temp;
3414     }
3415 
3416   return 0;
3417 }
3418 
3419 /* Emit code to compute the absolute value of OP0, with result to
3420    TARGET if convenient.  (TARGET may be 0.)  The return value says
3421    where the result actually is to be found.
3422 
3423    MODE is the mode of the operand; the mode of the result is
3424    different but can be deduced from MODE.
3425 
3426  */
3427 
3428 rtx
3429 expand_abs_nojump (machine_mode mode, rtx op0, rtx target,
3430 		   int result_unsignedp)
3431 {
3432   rtx temp;
3433 
3434   if (GET_MODE_CLASS (mode) != MODE_INT
3435       || ! flag_trapv)
3436     result_unsignedp = 1;
3437 
3438   /* First try to do it with a special abs instruction.  */
3439   temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3440                       op0, target, 0);
3441   if (temp != 0)
3442     return temp;
3443 
3444   /* For floating point modes, try clearing the sign bit.  */
3445   if (SCALAR_FLOAT_MODE_P (mode))
3446     {
3447       temp = expand_absneg_bit (ABS, mode, op0, target);
3448       if (temp)
3449 	return temp;
3450     }
3451 
3452   /* If we have a MAX insn, we can do this as MAX (x, -x).  */
3453   if (optab_handler (smax_optab, mode) != CODE_FOR_nothing
3454       && !HONOR_SIGNED_ZEROS (mode))
3455     {
3456       rtx_insn *last = get_last_insn ();
3457 
3458       temp = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3459 			  op0, NULL_RTX, 0);
3460       if (temp != 0)
3461 	temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3462 			     OPTAB_WIDEN);
3463 
3464       if (temp != 0)
3465 	return temp;
3466 
3467       delete_insns_since (last);
3468     }
3469 
3470   /* If this machine has expensive jumps, we can do integer absolute
3471      value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3472      where W is the width of MODE.  */
3473 
3474   if (GET_MODE_CLASS (mode) == MODE_INT
3475       && BRANCH_COST (optimize_insn_for_speed_p (),
3476 	      	      false) >= 2)
3477     {
3478       rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3479 				   GET_MODE_PRECISION (mode) - 1,
3480 				   NULL_RTX, 0);
3481 
3482       temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3483 			   OPTAB_LIB_WIDEN);
3484       if (temp != 0)
3485 	temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3486                              temp, extended, target, 0, OPTAB_LIB_WIDEN);
3487 
3488       if (temp != 0)
3489 	return temp;
3490     }
3491 
3492   return NULL_RTX;
3493 }
3494 
3495 rtx
3496 expand_abs (machine_mode mode, rtx op0, rtx target,
3497 	    int result_unsignedp, int safe)
3498 {
3499   rtx temp;
3500   rtx_code_label *op1;
3501 
3502   if (GET_MODE_CLASS (mode) != MODE_INT
3503       || ! flag_trapv)
3504     result_unsignedp = 1;
3505 
3506   temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3507   if (temp != 0)
3508     return temp;
3509 
3510   /* If that does not win, use conditional jump and negate.  */
3511 
3512   /* It is safe to use the target if it is the same
3513      as the source if this is also a pseudo register */
3514   if (op0 == target && REG_P (op0)
3515       && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3516     safe = 1;
3517 
3518   op1 = gen_label_rtx ();
3519   if (target == 0 || ! safe
3520       || GET_MODE (target) != mode
3521       || (MEM_P (target) && MEM_VOLATILE_P (target))
3522       || (REG_P (target)
3523 	  && REGNO (target) < FIRST_PSEUDO_REGISTER))
3524     target = gen_reg_rtx (mode);
3525 
3526   emit_move_insn (target, op0);
3527   NO_DEFER_POP;
3528 
3529   do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3530 			   NULL_RTX, NULL_RTX, op1, -1);
3531 
3532   op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3533                      target, target, 0);
3534   if (op0 != target)
3535     emit_move_insn (target, op0);
3536   emit_label (op1);
3537   OK_DEFER_POP;
3538   return target;
3539 }
3540 
3541 /* Emit code to compute the one's complement absolute value of OP0
3542    (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3543    (TARGET may be NULL_RTX.)  The return value says where the result
3544    actually is to be found.
3545 
3546    MODE is the mode of the operand; the mode of the result is
3547    different but can be deduced from MODE.  */
3548 
3549 rtx
3550 expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target)
3551 {
3552   rtx temp;
3553 
3554   /* Not applicable for floating point modes.  */
3555   if (FLOAT_MODE_P (mode))
3556     return NULL_RTX;
3557 
3558   /* If we have a MAX insn, we can do this as MAX (x, ~x).  */
3559   if (optab_handler (smax_optab, mode) != CODE_FOR_nothing)
3560     {
3561       rtx_insn *last = get_last_insn ();
3562 
3563       temp = expand_unop (mode, one_cmpl_optab, op0, NULL_RTX, 0);
3564       if (temp != 0)
3565 	temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3566 			     OPTAB_WIDEN);
3567 
3568       if (temp != 0)
3569 	return temp;
3570 
3571       delete_insns_since (last);
3572     }
3573 
3574   /* If this machine has expensive jumps, we can do one's complement
3575      absolute value of X as (((signed) x >> (W-1)) ^ x).  */
3576 
3577   if (GET_MODE_CLASS (mode) == MODE_INT
3578       && BRANCH_COST (optimize_insn_for_speed_p (),
3579 	             false) >= 2)
3580     {
3581       rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3582 				   GET_MODE_PRECISION (mode) - 1,
3583 				   NULL_RTX, 0);
3584 
3585       temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3586 			   OPTAB_LIB_WIDEN);
3587 
3588       if (temp != 0)
3589 	return temp;
3590     }
3591 
3592   return NULL_RTX;
3593 }
3594 
3595 /* A subroutine of expand_copysign, perform the copysign operation using the
3596    abs and neg primitives advertised to exist on the target.  The assumption
3597    is that we have a split register file, and leaving op0 in fp registers,
3598    and not playing with subregs so much, will help the register allocator.  */
3599 
3600 static rtx
3601 expand_copysign_absneg (machine_mode mode, rtx op0, rtx op1, rtx target,
3602 		        int bitpos, bool op0_is_abs)
3603 {
3604   machine_mode imode;
3605   enum insn_code icode;
3606   rtx sign;
3607   rtx_code_label *label;
3608 
3609   if (target == op1)
3610     target = NULL_RTX;
3611 
3612   /* Check if the back end provides an insn that handles signbit for the
3613      argument's mode. */
3614   icode = optab_handler (signbit_optab, mode);
3615   if (icode != CODE_FOR_nothing)
3616     {
3617       imode = insn_data[(int) icode].operand[0].mode;
3618       sign = gen_reg_rtx (imode);
3619       emit_unop_insn (icode, sign, op1, UNKNOWN);
3620     }
3621   else
3622     {
3623       if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3624 	{
3625 	  imode = int_mode_for_mode (mode);
3626 	  if (imode == BLKmode)
3627 	    return NULL_RTX;
3628 	  op1 = gen_lowpart (imode, op1);
3629 	}
3630       else
3631 	{
3632 	  int word;
3633 
3634 	  imode = word_mode;
3635 	  if (FLOAT_WORDS_BIG_ENDIAN)
3636 	    word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3637 	  else
3638 	    word = bitpos / BITS_PER_WORD;
3639 	  bitpos = bitpos % BITS_PER_WORD;
3640 	  op1 = operand_subword_force (op1, word, mode);
3641 	}
3642 
3643       wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3644       sign = expand_binop (imode, and_optab, op1,
3645 			   immed_wide_int_const (mask, imode),
3646 			   NULL_RTX, 1, OPTAB_LIB_WIDEN);
3647     }
3648 
3649   if (!op0_is_abs)
3650     {
3651       op0 = expand_unop (mode, abs_optab, op0, target, 0);
3652       if (op0 == NULL)
3653 	return NULL_RTX;
3654       target = op0;
3655     }
3656   else
3657     {
3658       if (target == NULL_RTX)
3659         target = copy_to_reg (op0);
3660       else
3661 	emit_move_insn (target, op0);
3662     }
3663 
3664   label = gen_label_rtx ();
3665   emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3666 
3667   if (CONST_DOUBLE_AS_FLOAT_P (op0))
3668     op0 = simplify_unary_operation (NEG, mode, op0, mode);
3669   else
3670     op0 = expand_unop (mode, neg_optab, op0, target, 0);
3671   if (op0 != target)
3672     emit_move_insn (target, op0);
3673 
3674   emit_label (label);
3675 
3676   return target;
3677 }
3678 
3679 
3680 /* A subroutine of expand_copysign, perform the entire copysign operation
3681    with integer bitmasks.  BITPOS is the position of the sign bit; OP0_IS_ABS
3682    is true if op0 is known to have its sign bit clear.  */
3683 
3684 static rtx
3685 expand_copysign_bit (machine_mode mode, rtx op0, rtx op1, rtx target,
3686 		     int bitpos, bool op0_is_abs)
3687 {
3688   machine_mode imode;
3689   int word, nwords, i;
3690   rtx temp;
3691   rtx_insn *insns;
3692 
3693   if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3694     {
3695       imode = int_mode_for_mode (mode);
3696       if (imode == BLKmode)
3697 	return NULL_RTX;
3698       word = 0;
3699       nwords = 1;
3700     }
3701   else
3702     {
3703       imode = word_mode;
3704 
3705       if (FLOAT_WORDS_BIG_ENDIAN)
3706 	word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3707       else
3708 	word = bitpos / BITS_PER_WORD;
3709       bitpos = bitpos % BITS_PER_WORD;
3710       nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3711     }
3712 
3713   wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
3714 
3715   if (target == 0
3716       || target == op0
3717       || target == op1
3718       || (nwords > 1 && !valid_multiword_target_p (target)))
3719     target = gen_reg_rtx (mode);
3720 
3721   if (nwords > 1)
3722     {
3723       start_sequence ();
3724 
3725       for (i = 0; i < nwords; ++i)
3726 	{
3727 	  rtx targ_piece = operand_subword (target, i, 1, mode);
3728 	  rtx op0_piece = operand_subword_force (op0, i, mode);
3729 
3730 	  if (i == word)
3731 	    {
3732 	      if (!op0_is_abs)
3733 		op0_piece
3734 		  = expand_binop (imode, and_optab, op0_piece,
3735 				  immed_wide_int_const (~mask, imode),
3736 				  NULL_RTX, 1, OPTAB_LIB_WIDEN);
3737 	      op1 = expand_binop (imode, and_optab,
3738 				  operand_subword_force (op1, i, mode),
3739 				  immed_wide_int_const (mask, imode),
3740 				  NULL_RTX, 1, OPTAB_LIB_WIDEN);
3741 
3742 	      temp = expand_binop (imode, ior_optab, op0_piece, op1,
3743 				   targ_piece, 1, OPTAB_LIB_WIDEN);
3744 	      if (temp != targ_piece)
3745 		emit_move_insn (targ_piece, temp);
3746 	    }
3747 	  else
3748 	    emit_move_insn (targ_piece, op0_piece);
3749 	}
3750 
3751       insns = get_insns ();
3752       end_sequence ();
3753 
3754       emit_insn (insns);
3755     }
3756   else
3757     {
3758       op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3759 		          immed_wide_int_const (mask, imode),
3760 		          NULL_RTX, 1, OPTAB_LIB_WIDEN);
3761 
3762       op0 = gen_lowpart (imode, op0);
3763       if (!op0_is_abs)
3764 	op0 = expand_binop (imode, and_optab, op0,
3765 			    immed_wide_int_const (~mask, imode),
3766 			    NULL_RTX, 1, OPTAB_LIB_WIDEN);
3767 
3768       temp = expand_binop (imode, ior_optab, op0, op1,
3769 			   gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3770       target = lowpart_subreg_maybe_copy (mode, temp, imode);
3771     }
3772 
3773   return target;
3774 }
3775 
3776 /* Expand the C99 copysign operation.  OP0 and OP1 must be the same
3777    scalar floating point mode.  Return NULL if we do not know how to
3778    expand the operation inline.  */
3779 
3780 rtx
3781 expand_copysign (rtx op0, rtx op1, rtx target)
3782 {
3783   machine_mode mode = GET_MODE (op0);
3784   const struct real_format *fmt;
3785   bool op0_is_abs;
3786   rtx temp;
3787 
3788   gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3789   gcc_assert (GET_MODE (op1) == mode);
3790 
3791   /* First try to do it with a special instruction.  */
3792   temp = expand_binop (mode, copysign_optab, op0, op1,
3793 		       target, 0, OPTAB_DIRECT);
3794   if (temp)
3795     return temp;
3796 
3797   fmt = REAL_MODE_FORMAT (mode);
3798   if (fmt == NULL || !fmt->has_signed_zero)
3799     return NULL_RTX;
3800 
3801   op0_is_abs = false;
3802   if (CONST_DOUBLE_AS_FLOAT_P (op0))
3803     {
3804       if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3805 	op0 = simplify_unary_operation (ABS, mode, op0, mode);
3806       op0_is_abs = true;
3807     }
3808 
3809   if (fmt->signbit_ro >= 0
3810       && (CONST_DOUBLE_AS_FLOAT_P (op0)
3811 	  || (optab_handler (neg_optab, mode) != CODE_FOR_nothing
3812 	      && optab_handler (abs_optab, mode) != CODE_FOR_nothing)))
3813     {
3814       temp = expand_copysign_absneg (mode, op0, op1, target,
3815 				     fmt->signbit_ro, op0_is_abs);
3816       if (temp)
3817 	return temp;
3818     }
3819 
3820   if (fmt->signbit_rw < 0)
3821     return NULL_RTX;
3822   return expand_copysign_bit (mode, op0, op1, target,
3823 			      fmt->signbit_rw, op0_is_abs);
3824 }
3825 
3826 /* Generate an instruction whose insn-code is INSN_CODE,
3827    with two operands: an output TARGET and an input OP0.
3828    TARGET *must* be nonzero, and the output is always stored there.
3829    CODE is an rtx code such that (CODE OP0) is an rtx that describes
3830    the value that is stored into TARGET.
3831 
3832    Return false if expansion failed.  */
3833 
3834 bool
3835 maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
3836 		      enum rtx_code code)
3837 {
3838   struct expand_operand ops[2];
3839   rtx pat;
3840 
3841   create_output_operand (&ops[0], target, GET_MODE (target));
3842   create_input_operand (&ops[1], op0, GET_MODE (op0));
3843   pat = maybe_gen_insn (icode, 2, ops);
3844   if (!pat)
3845     return false;
3846 
3847   if (INSN_P (pat) && NEXT_INSN (as_a <rtx_insn *> (pat)) != NULL_RTX
3848       && code != UNKNOWN)
3849     add_equal_note (as_a <rtx_insn *> (pat), ops[0].value, code, ops[1].value,
3850 		    NULL_RTX);
3851 
3852   emit_insn (pat);
3853 
3854   if (ops[0].value != target)
3855     emit_move_insn (target, ops[0].value);
3856   return true;
3857 }
3858 /* Generate an instruction whose insn-code is INSN_CODE,
3859    with two operands: an output TARGET and an input OP0.
3860    TARGET *must* be nonzero, and the output is always stored there.
3861    CODE is an rtx code such that (CODE OP0) is an rtx that describes
3862    the value that is stored into TARGET.  */
3863 
3864 void
3865 emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code)
3866 {
3867   bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3868   gcc_assert (ok);
3869 }
3870 
3871 struct no_conflict_data
3872 {
3873   rtx target;
3874   rtx_insn *first, *insn;
3875   bool must_stay;
3876 };
3877 
3878 /* Called via note_stores by emit_libcall_block.  Set P->must_stay if
3879    the currently examined clobber / store has to stay in the list of
3880    insns that constitute the actual libcall block.  */
3881 static void
3882 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3883 {
3884   struct no_conflict_data *p= (struct no_conflict_data *) p0;
3885 
3886   /* If this inns directly contributes to setting the target, it must stay.  */
3887   if (reg_overlap_mentioned_p (p->target, dest))
3888     p->must_stay = true;
3889   /* If we haven't committed to keeping any other insns in the list yet,
3890      there is nothing more to check.  */
3891   else if (p->insn == p->first)
3892     return;
3893   /* If this insn sets / clobbers a register that feeds one of the insns
3894      already in the list, this insn has to stay too.  */
3895   else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3896 	   || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3897 	   || reg_used_between_p (dest, p->first, p->insn)
3898 	   /* Likewise if this insn depends on a register set by a previous
3899 	      insn in the list, or if it sets a result (presumably a hard
3900 	      register) that is set or clobbered by a previous insn.
3901 	      N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3902 	      SET_DEST perform the former check on the address, and the latter
3903 	      check on the MEM.  */
3904 	   || (GET_CODE (set) == SET
3905 	       && (modified_in_p (SET_SRC (set), p->first)
3906 		   || modified_in_p (SET_DEST (set), p->first)
3907 		   || modified_between_p (SET_SRC (set), p->first, p->insn)
3908 		   || modified_between_p (SET_DEST (set), p->first, p->insn))))
3909     p->must_stay = true;
3910 }
3911 
3912 
3913 /* Emit code to make a call to a constant function or a library call.
3914 
3915    INSNS is a list containing all insns emitted in the call.
3916    These insns leave the result in RESULT.  Our block is to copy RESULT
3917    to TARGET, which is logically equivalent to EQUIV.
3918 
3919    We first emit any insns that set a pseudo on the assumption that these are
3920    loading constants into registers; doing so allows them to be safely cse'ed
3921    between blocks.  Then we emit all the other insns in the block, followed by
3922    an insn to move RESULT to TARGET.  This last insn will have a REQ_EQUAL
3923    note with an operand of EQUIV.  */
3924 
3925 static void
3926 emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv,
3927 		      bool equiv_may_trap)
3928 {
3929   rtx final_dest = target;
3930   rtx_insn *next, *last, *insn;
3931 
3932   /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3933      into a MEM later.  Protect the libcall block from this change.  */
3934   if (! REG_P (target) || REG_USERVAR_P (target))
3935     target = gen_reg_rtx (GET_MODE (target));
3936 
3937   /* If we're using non-call exceptions, a libcall corresponding to an
3938      operation that may trap may also trap.  */
3939   /* ??? See the comment in front of make_reg_eh_region_note.  */
3940   if (cfun->can_throw_non_call_exceptions
3941       && (equiv_may_trap || may_trap_p (equiv)))
3942     {
3943       for (insn = insns; insn; insn = NEXT_INSN (insn))
3944 	if (CALL_P (insn))
3945 	  {
3946 	    rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3947 	    if (note)
3948 	      {
3949 		int lp_nr = INTVAL (XEXP (note, 0));
3950 		if (lp_nr == 0 || lp_nr == INT_MIN)
3951 		  remove_note (insn, note);
3952 	      }
3953 	  }
3954     }
3955   else
3956     {
3957       /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3958 	 reg note to indicate that this call cannot throw or execute a nonlocal
3959 	 goto (unless there is already a REG_EH_REGION note, in which case
3960 	 we update it).  */
3961       for (insn = insns; insn; insn = NEXT_INSN (insn))
3962 	if (CALL_P (insn))
3963 	  make_reg_eh_region_note_nothrow_nononlocal (insn);
3964     }
3965 
3966   /* First emit all insns that set pseudos.  Remove them from the list as
3967      we go.  Avoid insns that set pseudos which were referenced in previous
3968      insns.  These can be generated by move_by_pieces, for example,
3969      to update an address.  Similarly, avoid insns that reference things
3970      set in previous insns.  */
3971 
3972   for (insn = insns; insn; insn = next)
3973     {
3974       rtx set = single_set (insn);
3975 
3976       next = NEXT_INSN (insn);
3977 
3978       if (set != 0 && REG_P (SET_DEST (set))
3979 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3980 	{
3981 	  struct no_conflict_data data;
3982 
3983 	  data.target = const0_rtx;
3984 	  data.first = insns;
3985 	  data.insn = insn;
3986 	  data.must_stay = 0;
3987 	  note_stores (PATTERN (insn), no_conflict_move_test, &data);
3988 	  if (! data.must_stay)
3989 	    {
3990 	      if (PREV_INSN (insn))
3991 		SET_NEXT_INSN (PREV_INSN (insn)) = next;
3992 	      else
3993 		insns = next;
3994 
3995 	      if (next)
3996 		SET_PREV_INSN (next) = PREV_INSN (insn);
3997 
3998 	      add_insn (insn);
3999 	    }
4000 	}
4001 
4002       /* Some ports use a loop to copy large arguments onto the stack.
4003 	 Don't move anything outside such a loop.  */
4004       if (LABEL_P (insn))
4005 	break;
4006     }
4007 
4008   /* Write the remaining insns followed by the final copy.  */
4009   for (insn = insns; insn; insn = next)
4010     {
4011       next = NEXT_INSN (insn);
4012 
4013       add_insn (insn);
4014     }
4015 
4016   last = emit_move_insn (target, result);
4017   set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target);
4018 
4019   if (final_dest != target)
4020     emit_move_insn (final_dest, target);
4021 }
4022 
4023 void
4024 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
4025 {
4026   emit_libcall_block_1 (safe_as_a <rtx_insn *> (insns),
4027 			target, result, equiv, false);
4028 }
4029 
4030 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4031    PURPOSE describes how this comparison will be used.  CODE is the rtx
4032    comparison code we will be using.
4033 
4034    ??? Actually, CODE is slightly weaker than that.  A target is still
4035    required to implement all of the normal bcc operations, but not
4036    required to implement all (or any) of the unordered bcc operations.  */
4037 
4038 int
4039 can_compare_p (enum rtx_code code, machine_mode mode,
4040 	       enum can_compare_purpose purpose)
4041 {
4042   rtx test;
4043   test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
4044   do
4045     {
4046       enum insn_code icode;
4047 
4048       if (purpose == ccp_jump
4049           && (icode = optab_handler (cbranch_optab, mode)) != CODE_FOR_nothing
4050           && insn_operand_matches (icode, 0, test))
4051         return 1;
4052       if (purpose == ccp_store_flag
4053           && (icode = optab_handler (cstore_optab, mode)) != CODE_FOR_nothing
4054           && insn_operand_matches (icode, 1, test))
4055         return 1;
4056       if (purpose == ccp_cmov
4057 	  && optab_handler (cmov_optab, mode) != CODE_FOR_nothing)
4058 	return 1;
4059 
4060       mode = GET_MODE_WIDER_MODE (mode);
4061       PUT_MODE (test, mode);
4062     }
4063   while (mode != VOIDmode);
4064 
4065   return 0;
4066 }
4067 
4068 /* This function is called when we are going to emit a compare instruction that
4069    compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4070 
4071    *PMODE is the mode of the inputs (in case they are const_int).
4072    *PUNSIGNEDP nonzero says that the operands are unsigned;
4073    this matters if they need to be widened (as given by METHODS).
4074 
4075    If they have mode BLKmode, then SIZE specifies the size of both operands.
4076 
4077    This function performs all the setup necessary so that the caller only has
4078    to emit a single comparison insn.  This setup can involve doing a BLKmode
4079    comparison or emitting a library call to perform the comparison if no insn
4080    is available to handle it.
4081    The values which are passed in through pointers can be modified; the caller
4082    should perform the comparison on the modified values.  Constant
4083    comparisons must have already been folded.  */
4084 
4085 static void
4086 prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4087 		  int unsignedp, enum optab_methods methods,
4088 		  rtx *ptest, machine_mode *pmode)
4089 {
4090   machine_mode mode = *pmode;
4091   rtx libfunc, test;
4092   machine_mode cmp_mode;
4093   enum mode_class mclass;
4094 
4095   /* The other methods are not needed.  */
4096   gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
4097 	      || methods == OPTAB_LIB_WIDEN);
4098 
4099   /* If we are optimizing, force expensive constants into a register.  */
4100   if (CONSTANT_P (x) && optimize
4101       && (rtx_cost (x, COMPARE, 0, optimize_insn_for_speed_p ())
4102           > COSTS_N_INSNS (1)))
4103     x = force_reg (mode, x);
4104 
4105   if (CONSTANT_P (y) && optimize
4106       && (rtx_cost (y, COMPARE, 1, optimize_insn_for_speed_p ())
4107           > COSTS_N_INSNS (1)))
4108     y = force_reg (mode, y);
4109 
4110 #ifdef HAVE_cc0
4111   /* Make sure if we have a canonical comparison.  The RTL
4112      documentation states that canonical comparisons are required only
4113      for targets which have cc0.  */
4114   gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4115 #endif
4116 
4117   /* Don't let both operands fail to indicate the mode.  */
4118   if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4119     x = force_reg (mode, x);
4120   if (mode == VOIDmode)
4121     mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y);
4122 
4123   /* Handle all BLKmode compares.  */
4124 
4125   if (mode == BLKmode)
4126     {
4127       machine_mode result_mode;
4128       enum insn_code cmp_code;
4129       tree length_type;
4130       rtx libfunc;
4131       rtx result;
4132       rtx opalign
4133 	= GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4134 
4135       gcc_assert (size);
4136 
4137       /* Try to use a memory block compare insn - either cmpstr
4138 	 or cmpmem will do.  */
4139       for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4140 	   cmp_mode != VOIDmode;
4141 	   cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4142 	{
4143 	  cmp_code = direct_optab_handler (cmpmem_optab, cmp_mode);
4144 	  if (cmp_code == CODE_FOR_nothing)
4145 	    cmp_code = direct_optab_handler (cmpstr_optab, cmp_mode);
4146 	  if (cmp_code == CODE_FOR_nothing)
4147 	    cmp_code = direct_optab_handler (cmpstrn_optab, cmp_mode);
4148 	  if (cmp_code == CODE_FOR_nothing)
4149 	    continue;
4150 
4151 	  /* Must make sure the size fits the insn's mode.  */
4152 	  if ((CONST_INT_P (size)
4153 	       && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4154 	      || (GET_MODE_BITSIZE (GET_MODE (size))
4155 		  > GET_MODE_BITSIZE (cmp_mode)))
4156 	    continue;
4157 
4158 	  result_mode = insn_data[cmp_code].operand[0].mode;
4159 	  result = gen_reg_rtx (result_mode);
4160 	  size = convert_to_mode (cmp_mode, size, 1);
4161 	  emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4162 
4163           *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx);
4164           *pmode = result_mode;
4165 	  return;
4166 	}
4167 
4168       if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN)
4169 	goto fail;
4170 
4171       /* Otherwise call a library function, memcmp.  */
4172       libfunc = memcmp_libfunc;
4173       length_type = sizetype;
4174       result_mode = TYPE_MODE (integer_type_node);
4175       cmp_mode = TYPE_MODE (length_type);
4176       size = convert_to_mode (TYPE_MODE (length_type), size,
4177 			      TYPE_UNSIGNED (length_type));
4178 
4179       result = emit_library_call_value (libfunc, 0, LCT_PURE,
4180 					result_mode, 3,
4181 					XEXP (x, 0), Pmode,
4182 					XEXP (y, 0), Pmode,
4183 					size, cmp_mode);
4184       x = result;
4185       y = const0_rtx;
4186       mode = result_mode;
4187       methods = OPTAB_LIB_WIDEN;
4188       unsignedp = false;
4189     }
4190 
4191   /* Don't allow operands to the compare to trap, as that can put the
4192      compare and branch in different basic blocks.  */
4193   if (cfun->can_throw_non_call_exceptions)
4194     {
4195       if (may_trap_p (x))
4196 	x = force_reg (mode, x);
4197       if (may_trap_p (y))
4198 	y = force_reg (mode, y);
4199     }
4200 
4201   if (GET_MODE_CLASS (mode) == MODE_CC)
4202     {
4203       enum insn_code icode = optab_handler (cbranch_optab, CCmode);
4204       test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4205       gcc_assert (icode != CODE_FOR_nothing
4206                   && insn_operand_matches (icode, 0, test));
4207       *ptest = test;
4208       return;
4209     }
4210 
4211   mclass = GET_MODE_CLASS (mode);
4212   test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y);
4213   cmp_mode = mode;
4214   do
4215    {
4216       enum insn_code icode;
4217       icode = optab_handler (cbranch_optab, cmp_mode);
4218       if (icode != CODE_FOR_nothing
4219 	  && insn_operand_matches (icode, 0, test))
4220 	{
4221 	  rtx_insn *last = get_last_insn ();
4222 	  rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp);
4223 	  rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp);
4224 	  if (op0 && op1
4225 	      && insn_operand_matches (icode, 1, op0)
4226 	      && insn_operand_matches (icode, 2, op1))
4227 	    {
4228 	      XEXP (test, 0) = op0;
4229 	      XEXP (test, 1) = op1;
4230 	      *ptest = test;
4231 	      *pmode = cmp_mode;
4232 	      return;
4233 	    }
4234 	  delete_insns_since (last);
4235 	}
4236 
4237       if (methods == OPTAB_DIRECT || !CLASS_HAS_WIDER_MODES_P (mclass))
4238 	break;
4239       cmp_mode = GET_MODE_WIDER_MODE (cmp_mode);
4240     }
4241   while (cmp_mode != VOIDmode);
4242 
4243   if (methods != OPTAB_LIB_WIDEN)
4244     goto fail;
4245 
4246   if (!SCALAR_FLOAT_MODE_P (mode))
4247     {
4248       rtx result;
4249       machine_mode ret_mode;
4250 
4251       /* Handle a libcall just for the mode we are using.  */
4252       libfunc = optab_libfunc (cmp_optab, mode);
4253       gcc_assert (libfunc);
4254 
4255       /* If we want unsigned, and this mode has a distinct unsigned
4256 	 comparison routine, use that.  */
4257       if (unsignedp)
4258 	{
4259 	  rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4260 	  if (ulibfunc)
4261 	    libfunc = ulibfunc;
4262 	}
4263 
4264       ret_mode = targetm.libgcc_cmp_return_mode ();
4265       result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4266 					ret_mode, 2, x, mode, y, mode);
4267 
4268       /* There are two kinds of comparison routines. Biased routines
4269 	 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4270 	 of gcc expect that the comparison operation is equivalent
4271 	 to the modified comparison. For signed comparisons compare the
4272 	 result against 1 in the biased case, and zero in the unbiased
4273 	 case. For unsigned comparisons always compare against 1 after
4274 	 biasing the unbiased result by adding 1. This gives us a way to
4275 	 represent LTU.
4276 	 The comparisons in the fixed-point helper library are always
4277 	 biased.  */
4278       x = result;
4279       y = const1_rtx;
4280 
4281       if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode))
4282 	{
4283 	  if (unsignedp)
4284 	    x = plus_constant (ret_mode, result, 1);
4285 	  else
4286 	    y = const0_rtx;
4287 	}
4288 
4289       *pmode = ret_mode;
4290       prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods,
4291 			ptest, pmode);
4292     }
4293   else
4294     prepare_float_lib_cmp (x, y, comparison, ptest, pmode);
4295 
4296   return;
4297 
4298  fail:
4299   *ptest = NULL_RTX;
4300 }
4301 
4302 /* Before emitting an insn with code ICODE, make sure that X, which is going
4303    to be used for operand OPNUM of the insn, is converted from mode MODE to
4304    WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4305    that it is accepted by the operand predicate.  Return the new value.  */
4306 
4307 rtx
4308 prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode,
4309 		 machine_mode wider_mode, int unsignedp)
4310 {
4311   if (mode != wider_mode)
4312     x = convert_modes (wider_mode, mode, x, unsignedp);
4313 
4314   if (!insn_operand_matches (icode, opnum, x))
4315     {
4316       machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode;
4317       if (reload_completed)
4318 	return NULL_RTX;
4319       if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode)
4320 	return NULL_RTX;
4321       x = copy_to_mode_reg (op_mode, x);
4322     }
4323 
4324   return x;
4325 }
4326 
4327 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4328    we can do the branch.  */
4329 
4330 static void
4331 emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label, int prob)
4332 {
4333   machine_mode optab_mode;
4334   enum mode_class mclass;
4335   enum insn_code icode;
4336   rtx_insn *insn;
4337 
4338   mclass = GET_MODE_CLASS (mode);
4339   optab_mode = (mclass == MODE_CC) ? CCmode : mode;
4340   icode = optab_handler (cbranch_optab, optab_mode);
4341 
4342   gcc_assert (icode != CODE_FOR_nothing);
4343   gcc_assert (insn_operand_matches (icode, 0, test));
4344   insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0),
4345                                           XEXP (test, 1), label));
4346   if (prob != -1
4347       && profile_status_for_fn (cfun) != PROFILE_ABSENT
4348       && insn
4349       && JUMP_P (insn)
4350       && any_condjump_p (insn)
4351       && !find_reg_note (insn, REG_BR_PROB, 0))
4352     add_int_reg_note (insn, REG_BR_PROB, prob);
4353 }
4354 
4355 /* Generate code to compare X with Y so that the condition codes are
4356    set and to jump to LABEL if the condition is true.  If X is a
4357    constant and Y is not a constant, then the comparison is swapped to
4358    ensure that the comparison RTL has the canonical form.
4359 
4360    UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4361    need to be widened.  UNSIGNEDP is also used to select the proper
4362    branch condition code.
4363 
4364    If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4365 
4366    MODE is the mode of the inputs (in case they are const_int).
4367 
4368    COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4369    It will be potentially converted into an unsigned variant based on
4370    UNSIGNEDP to select a proper jump instruction.
4371 
4372    PROB is the probability of jumping to LABEL.  */
4373 
4374 void
4375 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4376 			 machine_mode mode, int unsignedp, rtx label,
4377                          int prob)
4378 {
4379   rtx op0 = x, op1 = y;
4380   rtx test;
4381 
4382   /* Swap operands and condition to ensure canonical RTL.  */
4383   if (swap_commutative_operands_p (x, y)
4384       && can_compare_p (swap_condition (comparison), mode, ccp_jump))
4385     {
4386       op0 = y, op1 = x;
4387       comparison = swap_condition (comparison);
4388     }
4389 
4390   /* If OP0 is still a constant, then both X and Y must be constants
4391      or the opposite comparison is not supported.  Force X into a register
4392      to create canonical RTL.  */
4393   if (CONSTANT_P (op0))
4394     op0 = force_reg (mode, op0);
4395 
4396   if (unsignedp)
4397     comparison = unsigned_condition (comparison);
4398 
4399   prepare_cmp_insn (op0, op1, comparison, size, unsignedp, OPTAB_LIB_WIDEN,
4400 		    &test, &mode);
4401   emit_cmp_and_jump_insn_1 (test, mode, label, prob);
4402 }
4403 
4404 
4405 /* Emit a library call comparison between floating point X and Y.
4406    COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).  */
4407 
4408 static void
4409 prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison,
4410 		       rtx *ptest, machine_mode *pmode)
4411 {
4412   enum rtx_code swapped = swap_condition (comparison);
4413   enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4414   machine_mode orig_mode = GET_MODE (x);
4415   machine_mode mode, cmp_mode;
4416   rtx true_rtx, false_rtx;
4417   rtx value, target, equiv;
4418   rtx_insn *insns;
4419   rtx libfunc = 0;
4420   bool reversed_p = false;
4421   cmp_mode = targetm.libgcc_cmp_return_mode ();
4422 
4423   for (mode = orig_mode;
4424        mode != VOIDmode;
4425        mode = GET_MODE_WIDER_MODE (mode))
4426     {
4427       if (code_to_optab (comparison)
4428 	  && (libfunc = optab_libfunc (code_to_optab (comparison), mode)))
4429 	break;
4430 
4431       if (code_to_optab (swapped)
4432 	  && (libfunc = optab_libfunc (code_to_optab (swapped), mode)))
4433 	{
4434 	  rtx tmp;
4435 	  tmp = x; x = y; y = tmp;
4436 	  comparison = swapped;
4437 	  break;
4438 	}
4439 
4440       if (code_to_optab (reversed)
4441 	  && (libfunc = optab_libfunc (code_to_optab (reversed), mode)))
4442 	{
4443 	  comparison = reversed;
4444 	  reversed_p = true;
4445 	  break;
4446 	}
4447     }
4448 
4449   gcc_assert (mode != VOIDmode);
4450 
4451   if (mode != orig_mode)
4452     {
4453       x = convert_to_mode (mode, x, 0);
4454       y = convert_to_mode (mode, y, 0);
4455     }
4456 
4457   /* Attach a REG_EQUAL note describing the semantics of the libcall to
4458      the RTL.  The allows the RTL optimizers to delete the libcall if the
4459      condition can be determined at compile-time.  */
4460   if (comparison == UNORDERED
4461       || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4462     {
4463       true_rtx = const_true_rtx;
4464       false_rtx = const0_rtx;
4465     }
4466   else
4467     {
4468       switch (comparison)
4469         {
4470         case EQ:
4471           true_rtx = const0_rtx;
4472           false_rtx = const_true_rtx;
4473           break;
4474 
4475         case NE:
4476           true_rtx = const_true_rtx;
4477           false_rtx = const0_rtx;
4478           break;
4479 
4480         case GT:
4481           true_rtx = const1_rtx;
4482           false_rtx = const0_rtx;
4483           break;
4484 
4485         case GE:
4486           true_rtx = const0_rtx;
4487           false_rtx = constm1_rtx;
4488           break;
4489 
4490         case LT:
4491           true_rtx = constm1_rtx;
4492           false_rtx = const0_rtx;
4493           break;
4494 
4495         case LE:
4496           true_rtx = const0_rtx;
4497           false_rtx = const1_rtx;
4498           break;
4499 
4500         default:
4501           gcc_unreachable ();
4502         }
4503     }
4504 
4505   if (comparison == UNORDERED)
4506     {
4507       rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4508       equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4509       equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4510 				    temp, const_true_rtx, equiv);
4511     }
4512   else
4513     {
4514       equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4515       if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4516         equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4517                                       equiv, true_rtx, false_rtx);
4518     }
4519 
4520   start_sequence ();
4521   value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4522 				   cmp_mode, 2, x, mode, y, mode);
4523   insns = get_insns ();
4524   end_sequence ();
4525 
4526   target = gen_reg_rtx (cmp_mode);
4527   emit_libcall_block (insns, target, value, equiv);
4528 
4529   if (comparison == UNORDERED
4530       || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)
4531       || reversed_p)
4532     *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx);
4533   else
4534     *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx);
4535 
4536   *pmode = cmp_mode;
4537 }
4538 
4539 /* Generate code to indirectly jump to a location given in the rtx LOC.  */
4540 
4541 void
4542 emit_indirect_jump (rtx loc ATTRIBUTE_UNUSED)
4543 {
4544 #ifndef HAVE_indirect_jump
4545   sorry ("indirect jumps are not available on this target");
4546 #else
4547   struct expand_operand ops[1];
4548   create_address_operand (&ops[0], loc);
4549   expand_jump_insn (CODE_FOR_indirect_jump, 1, ops);
4550   emit_barrier ();
4551 #endif
4552 }
4553 
4554 #ifdef HAVE_conditional_move
4555 
4556 /* Emit a conditional move instruction if the machine supports one for that
4557    condition and machine mode.
4558 
4559    OP0 and OP1 are the operands that should be compared using CODE.  CMODE is
4560    the mode to use should they be constants.  If it is VOIDmode, they cannot
4561    both be constants.
4562 
4563    OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4564    should be stored there.  MODE is the mode to use should they be constants.
4565    If it is VOIDmode, they cannot both be constants.
4566 
4567    The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4568    is not supported.  */
4569 
4570 rtx
4571 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4572 		       machine_mode cmode, rtx op2, rtx op3,
4573 		       machine_mode mode, int unsignedp)
4574 {
4575   rtx tem, comparison;
4576   rtx_insn *last;
4577   enum insn_code icode;
4578   enum rtx_code reversed;
4579 
4580   /* If one operand is constant, make it the second one.  Only do this
4581      if the other operand is not constant as well.  */
4582 
4583   if (swap_commutative_operands_p (op0, op1))
4584     {
4585       tem = op0;
4586       op0 = op1;
4587       op1 = tem;
4588       code = swap_condition (code);
4589     }
4590 
4591   /* get_condition will prefer to generate LT and GT even if the old
4592      comparison was against zero, so undo that canonicalization here since
4593      comparisons against zero are cheaper.  */
4594   if (code == LT && op1 == const1_rtx)
4595     code = LE, op1 = const0_rtx;
4596   else if (code == GT && op1 == constm1_rtx)
4597     code = GE, op1 = const0_rtx;
4598 
4599   if (cmode == VOIDmode)
4600     cmode = GET_MODE (op0);
4601 
4602   if (swap_commutative_operands_p (op2, op3)
4603       && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4604           != UNKNOWN))
4605     {
4606       tem = op2;
4607       op2 = op3;
4608       op3 = tem;
4609       code = reversed;
4610     }
4611 
4612   if (mode == VOIDmode)
4613     mode = GET_MODE (op2);
4614 
4615   icode = direct_optab_handler (movcc_optab, mode);
4616 
4617   if (icode == CODE_FOR_nothing)
4618     return 0;
4619 
4620   if (!target)
4621     target = gen_reg_rtx (mode);
4622 
4623   code = unsignedp ? unsigned_condition (code) : code;
4624   comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4625 
4626   /* We can get const0_rtx or const_true_rtx in some circumstances.  Just
4627      return NULL and let the caller figure out how best to deal with this
4628      situation.  */
4629   if (!COMPARISON_P (comparison))
4630     return NULL_RTX;
4631 
4632   saved_pending_stack_adjust save;
4633   save_pending_stack_adjust (&save);
4634   last = get_last_insn ();
4635   do_pending_stack_adjust ();
4636   prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4637 		    GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4638 		    &comparison, &cmode);
4639   if (comparison)
4640     {
4641       struct expand_operand ops[4];
4642 
4643       create_output_operand (&ops[0], target, mode);
4644       create_fixed_operand (&ops[1], comparison);
4645       create_input_operand (&ops[2], op2, mode);
4646       create_input_operand (&ops[3], op3, mode);
4647       if (maybe_expand_insn (icode, 4, ops))
4648 	{
4649 	  if (ops[0].value != target)
4650 	    convert_move (target, ops[0].value, false);
4651 	  return target;
4652 	}
4653     }
4654   delete_insns_since (last);
4655   restore_pending_stack_adjust (&save);
4656   return NULL_RTX;
4657 }
4658 
4659 /* Return nonzero if a conditional move of mode MODE is supported.
4660 
4661    This function is for combine so it can tell whether an insn that looks
4662    like a conditional move is actually supported by the hardware.  If we
4663    guess wrong we lose a bit on optimization, but that's it.  */
4664 /* ??? sparc64 supports conditionally moving integers values based on fp
4665    comparisons, and vice versa.  How do we handle them?  */
4666 
4667 int
4668 can_conditionally_move_p (machine_mode mode)
4669 {
4670   if (direct_optab_handler (movcc_optab, mode) != CODE_FOR_nothing)
4671     return 1;
4672 
4673   return 0;
4674 }
4675 
4676 #endif /* HAVE_conditional_move */
4677 
4678 /* Emit a conditional addition instruction if the machine supports one for that
4679    condition and machine mode.
4680 
4681    OP0 and OP1 are the operands that should be compared using CODE.  CMODE is
4682    the mode to use should they be constants.  If it is VOIDmode, they cannot
4683    both be constants.
4684 
4685    OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4686    should be stored there.  MODE is the mode to use should they be constants.
4687    If it is VOIDmode, they cannot both be constants.
4688 
4689    The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4690    is not supported.  */
4691 
4692 rtx
4693 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4694 		      machine_mode cmode, rtx op2, rtx op3,
4695 		      machine_mode mode, int unsignedp)
4696 {
4697   rtx tem, comparison;
4698   rtx_insn *last;
4699   enum insn_code icode;
4700 
4701   /* If one operand is constant, make it the second one.  Only do this
4702      if the other operand is not constant as well.  */
4703 
4704   if (swap_commutative_operands_p (op0, op1))
4705     {
4706       tem = op0;
4707       op0 = op1;
4708       op1 = tem;
4709       code = swap_condition (code);
4710     }
4711 
4712   /* get_condition will prefer to generate LT and GT even if the old
4713      comparison was against zero, so undo that canonicalization here since
4714      comparisons against zero are cheaper.  */
4715   if (code == LT && op1 == const1_rtx)
4716     code = LE, op1 = const0_rtx;
4717   else if (code == GT && op1 == constm1_rtx)
4718     code = GE, op1 = const0_rtx;
4719 
4720   if (cmode == VOIDmode)
4721     cmode = GET_MODE (op0);
4722 
4723   if (mode == VOIDmode)
4724     mode = GET_MODE (op2);
4725 
4726   icode = optab_handler (addcc_optab, mode);
4727 
4728   if (icode == CODE_FOR_nothing)
4729     return 0;
4730 
4731   if (!target)
4732     target = gen_reg_rtx (mode);
4733 
4734   code = unsignedp ? unsigned_condition (code) : code;
4735   comparison = simplify_gen_relational (code, VOIDmode, cmode, op0, op1);
4736 
4737   /* We can get const0_rtx or const_true_rtx in some circumstances.  Just
4738      return NULL and let the caller figure out how best to deal with this
4739      situation.  */
4740   if (!COMPARISON_P (comparison))
4741     return NULL_RTX;
4742 
4743   do_pending_stack_adjust ();
4744   last = get_last_insn ();
4745   prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1),
4746                     GET_CODE (comparison), NULL_RTX, unsignedp, OPTAB_WIDEN,
4747                     &comparison, &cmode);
4748   if (comparison)
4749     {
4750       struct expand_operand ops[4];
4751 
4752       create_output_operand (&ops[0], target, mode);
4753       create_fixed_operand (&ops[1], comparison);
4754       create_input_operand (&ops[2], op2, mode);
4755       create_input_operand (&ops[3], op3, mode);
4756       if (maybe_expand_insn (icode, 4, ops))
4757 	{
4758 	  if (ops[0].value != target)
4759 	    convert_move (target, ops[0].value, false);
4760 	  return target;
4761 	}
4762     }
4763   delete_insns_since (last);
4764   return NULL_RTX;
4765 }
4766 
4767 /* These functions attempt to generate an insn body, rather than
4768    emitting the insn, but if the gen function already emits them, we
4769    make no attempt to turn them back into naked patterns.  */
4770 
4771 /* Generate and return an insn body to add Y to X.  */
4772 
4773 rtx
4774 gen_add2_insn (rtx x, rtx y)
4775 {
4776   enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
4777 
4778   gcc_assert (insn_operand_matches (icode, 0, x));
4779   gcc_assert (insn_operand_matches (icode, 1, x));
4780   gcc_assert (insn_operand_matches (icode, 2, y));
4781 
4782   return GEN_FCN (icode) (x, x, y);
4783 }
4784 
4785 /* Generate and return an insn body to add r1 and c,
4786    storing the result in r0.  */
4787 
4788 rtx
4789 gen_add3_insn (rtx r0, rtx r1, rtx c)
4790 {
4791   enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
4792 
4793   if (icode == CODE_FOR_nothing
4794       || !insn_operand_matches (icode, 0, r0)
4795       || !insn_operand_matches (icode, 1, r1)
4796       || !insn_operand_matches (icode, 2, c))
4797     return NULL_RTX;
4798 
4799   return GEN_FCN (icode) (r0, r1, c);
4800 }
4801 
4802 int
4803 have_add2_insn (rtx x, rtx y)
4804 {
4805   enum insn_code icode;
4806 
4807   gcc_assert (GET_MODE (x) != VOIDmode);
4808 
4809   icode = optab_handler (add_optab, GET_MODE (x));
4810 
4811   if (icode == CODE_FOR_nothing)
4812     return 0;
4813 
4814   if (!insn_operand_matches (icode, 0, x)
4815       || !insn_operand_matches (icode, 1, x)
4816       || !insn_operand_matches (icode, 2, y))
4817     return 0;
4818 
4819   return 1;
4820 }
4821 
4822 /* Generate and return an insn body to add Y to X.  */
4823 
4824 rtx
4825 gen_addptr3_insn (rtx x, rtx y, rtx z)
4826 {
4827   enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
4828 
4829   gcc_assert (insn_operand_matches (icode, 0, x));
4830   gcc_assert (insn_operand_matches (icode, 1, y));
4831   gcc_assert (insn_operand_matches (icode, 2, z));
4832 
4833   return GEN_FCN (icode) (x, y, z);
4834 }
4835 
4836 /* Return true if the target implements an addptr pattern and X, Y,
4837    and Z are valid for the pattern predicates.  */
4838 
4839 int
4840 have_addptr3_insn (rtx x, rtx y, rtx z)
4841 {
4842   enum insn_code icode;
4843 
4844   gcc_assert (GET_MODE (x) != VOIDmode);
4845 
4846   icode = optab_handler (addptr3_optab, GET_MODE (x));
4847 
4848   if (icode == CODE_FOR_nothing)
4849     return 0;
4850 
4851   if (!insn_operand_matches (icode, 0, x)
4852       || !insn_operand_matches (icode, 1, y)
4853       || !insn_operand_matches (icode, 2, z))
4854     return 0;
4855 
4856   return 1;
4857 }
4858 
4859 /* Generate and return an insn body to subtract Y from X.  */
4860 
4861 rtx
4862 gen_sub2_insn (rtx x, rtx y)
4863 {
4864   enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
4865 
4866   gcc_assert (insn_operand_matches (icode, 0, x));
4867   gcc_assert (insn_operand_matches (icode, 1, x));
4868   gcc_assert (insn_operand_matches (icode, 2, y));
4869 
4870   return GEN_FCN (icode) (x, x, y);
4871 }
4872 
4873 /* Generate and return an insn body to subtract r1 and c,
4874    storing the result in r0.  */
4875 
4876 rtx
4877 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4878 {
4879   enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
4880 
4881   if (icode == CODE_FOR_nothing
4882       || !insn_operand_matches (icode, 0, r0)
4883       || !insn_operand_matches (icode, 1, r1)
4884       || !insn_operand_matches (icode, 2, c))
4885     return NULL_RTX;
4886 
4887   return GEN_FCN (icode) (r0, r1, c);
4888 }
4889 
4890 int
4891 have_sub2_insn (rtx x, rtx y)
4892 {
4893   enum insn_code icode;
4894 
4895   gcc_assert (GET_MODE (x) != VOIDmode);
4896 
4897   icode = optab_handler (sub_optab, GET_MODE (x));
4898 
4899   if (icode == CODE_FOR_nothing)
4900     return 0;
4901 
4902   if (!insn_operand_matches (icode, 0, x)
4903       || !insn_operand_matches (icode, 1, x)
4904       || !insn_operand_matches (icode, 2, y))
4905     return 0;
4906 
4907   return 1;
4908 }
4909 
4910 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4911    UNSIGNEDP specifies zero-extension instead of sign-extension.  If
4912    no such operation exists, CODE_FOR_nothing will be returned.  */
4913 
4914 enum insn_code
4915 can_extend_p (machine_mode to_mode, machine_mode from_mode,
4916 	      int unsignedp)
4917 {
4918   convert_optab tab;
4919 #ifdef HAVE_ptr_extend
4920   if (unsignedp < 0)
4921     return CODE_FOR_ptr_extend;
4922 #endif
4923 
4924   tab = unsignedp ? zext_optab : sext_optab;
4925   return convert_optab_handler (tab, to_mode, from_mode);
4926 }
4927 
4928 /* Generate the body of an insn to extend Y (with mode MFROM)
4929    into X (with mode MTO).  Do zero-extension if UNSIGNEDP is nonzero.  */
4930 
4931 rtx
4932 gen_extend_insn (rtx x, rtx y, machine_mode mto,
4933 		 machine_mode mfrom, int unsignedp)
4934 {
4935   enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4936   return GEN_FCN (icode) (x, y);
4937 }
4938 
4939 /* can_fix_p and can_float_p say whether the target machine
4940    can directly convert a given fixed point type to
4941    a given floating point type, or vice versa.
4942    The returned value is the CODE_FOR_... value to use,
4943    or CODE_FOR_nothing if these modes cannot be directly converted.
4944 
4945    *TRUNCP_PTR is set to 1 if it is necessary to output
4946    an explicit FTRUNC insn before the fix insn; otherwise 0.  */
4947 
4948 static enum insn_code
4949 can_fix_p (machine_mode fixmode, machine_mode fltmode,
4950 	   int unsignedp, int *truncp_ptr)
4951 {
4952   convert_optab tab;
4953   enum insn_code icode;
4954 
4955   tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4956   icode = convert_optab_handler (tab, fixmode, fltmode);
4957   if (icode != CODE_FOR_nothing)
4958     {
4959       *truncp_ptr = 0;
4960       return icode;
4961     }
4962 
4963   /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4964      for this to work. We need to rework the fix* and ftrunc* patterns
4965      and documentation.  */
4966   tab = unsignedp ? ufix_optab : sfix_optab;
4967   icode = convert_optab_handler (tab, fixmode, fltmode);
4968   if (icode != CODE_FOR_nothing
4969       && optab_handler (ftrunc_optab, fltmode) != CODE_FOR_nothing)
4970     {
4971       *truncp_ptr = 1;
4972       return icode;
4973     }
4974 
4975   *truncp_ptr = 0;
4976   return CODE_FOR_nothing;
4977 }
4978 
4979 enum insn_code
4980 can_float_p (machine_mode fltmode, machine_mode fixmode,
4981 	     int unsignedp)
4982 {
4983   convert_optab tab;
4984 
4985   tab = unsignedp ? ufloat_optab : sfloat_optab;
4986   return convert_optab_handler (tab, fltmode, fixmode);
4987 }
4988 
4989 /* Function supportable_convert_operation
4990 
4991    Check whether an operation represented by the code CODE is a
4992    convert operation that is supported by the target platform in
4993    vector form (i.e., when operating on arguments of type VECTYPE_IN
4994    producing a result of type VECTYPE_OUT).
4995 
4996    Convert operations we currently support directly are FIX_TRUNC and FLOAT.
4997    This function checks if these operations are supported
4998    by the target platform either directly (via vector tree-codes), or via
4999    target builtins.
5000 
5001    Output:
5002    - CODE1 is code of vector operation to be used when
5003    vectorizing the operation, if available.
5004    - DECL is decl of target builtin functions to be used
5005    when vectorizing the operation, if available.  In this case,
5006    CODE1 is CALL_EXPR.  */
5007 
5008 bool
5009 supportable_convert_operation (enum tree_code code,
5010                                     tree vectype_out, tree vectype_in,
5011                                     tree *decl, enum tree_code *code1)
5012 {
5013   machine_mode m1,m2;
5014   int truncp;
5015 
5016   m1 = TYPE_MODE (vectype_out);
5017   m2 = TYPE_MODE (vectype_in);
5018 
5019   /* First check if we can done conversion directly.  */
5020   if ((code == FIX_TRUNC_EXPR
5021        && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
5022           != CODE_FOR_nothing)
5023       || (code == FLOAT_EXPR
5024           && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
5025 	     != CODE_FOR_nothing))
5026     {
5027       *code1 = code;
5028       return true;
5029     }
5030 
5031   /* Now check for builtin.  */
5032   if (targetm.vectorize.builtin_conversion
5033       && targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
5034     {
5035       *code1 = CALL_EXPR;
5036       *decl = targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in);
5037       return true;
5038     }
5039   return false;
5040 }
5041 
5042 
5043 /* Generate code to convert FROM to floating point
5044    and store in TO.  FROM must be fixed point and not VOIDmode.
5045    UNSIGNEDP nonzero means regard FROM as unsigned.
5046    Normally this is done by correcting the final value
5047    if it is negative.  */
5048 
5049 void
5050 expand_float (rtx to, rtx from, int unsignedp)
5051 {
5052   enum insn_code icode;
5053   rtx target = to;
5054   machine_mode fmode, imode;
5055   bool can_do_signed = false;
5056 
5057   /* Crash now, because we won't be able to decide which mode to use.  */
5058   gcc_assert (GET_MODE (from) != VOIDmode);
5059 
5060   /* Look for an insn to do the conversion.  Do it in the specified
5061      modes if possible; otherwise convert either input, output or both to
5062      wider mode.  If the integer mode is wider than the mode of FROM,
5063      we can do the conversion signed even if the input is unsigned.  */
5064 
5065   for (fmode = GET_MODE (to); fmode != VOIDmode;
5066        fmode = GET_MODE_WIDER_MODE (fmode))
5067     for (imode = GET_MODE (from); imode != VOIDmode;
5068 	 imode = GET_MODE_WIDER_MODE (imode))
5069       {
5070 	int doing_unsigned = unsignedp;
5071 
5072 	if (fmode != GET_MODE (to)
5073 	    && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from)))
5074 	  continue;
5075 
5076 	icode = can_float_p (fmode, imode, unsignedp);
5077 	if (icode == CODE_FOR_nothing && unsignedp)
5078 	  {
5079 	    enum insn_code scode = can_float_p (fmode, imode, 0);
5080 	    if (scode != CODE_FOR_nothing)
5081 	      can_do_signed = true;
5082 	    if (imode != GET_MODE (from))
5083 	      icode = scode, doing_unsigned = 0;
5084 	  }
5085 
5086 	if (icode != CODE_FOR_nothing)
5087 	  {
5088 	    if (imode != GET_MODE (from))
5089 	      from = convert_to_mode (imode, from, unsignedp);
5090 
5091 	    if (fmode != GET_MODE (to))
5092 	      target = gen_reg_rtx (fmode);
5093 
5094 	    emit_unop_insn (icode, target, from,
5095 			    doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5096 
5097 	    if (target != to)
5098 	      convert_move (to, target, 0);
5099 	    return;
5100 	  }
5101       }
5102 
5103   /* Unsigned integer, and no way to convert directly.  Convert as signed,
5104      then unconditionally adjust the result.  */
5105   if (unsignedp && can_do_signed)
5106     {
5107       rtx_code_label *label = gen_label_rtx ();
5108       rtx temp;
5109       REAL_VALUE_TYPE offset;
5110 
5111       /* Look for a usable floating mode FMODE wider than the source and at
5112 	 least as wide as the target.  Using FMODE will avoid rounding woes
5113 	 with unsigned values greater than the signed maximum value.  */
5114 
5115       for (fmode = GET_MODE (to);  fmode != VOIDmode;
5116 	   fmode = GET_MODE_WIDER_MODE (fmode))
5117 	if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5118 	    && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5119 	  break;
5120 
5121       if (fmode == VOIDmode)
5122 	{
5123 	  /* There is no such mode.  Pretend the target is wide enough.  */
5124 	  fmode = GET_MODE (to);
5125 
5126 	  /* Avoid double-rounding when TO is narrower than FROM.  */
5127 	  if ((significand_size (fmode) + 1)
5128 	      < GET_MODE_PRECISION (GET_MODE (from)))
5129 	    {
5130 	      rtx temp1;
5131 	      rtx_code_label *neglabel = gen_label_rtx ();
5132 
5133 	      /* Don't use TARGET if it isn't a register, is a hard register,
5134 		 or is the wrong mode.  */
5135 	      if (!REG_P (target)
5136 		  || REGNO (target) < FIRST_PSEUDO_REGISTER
5137 		  || GET_MODE (target) != fmode)
5138 		target = gen_reg_rtx (fmode);
5139 
5140 	      imode = GET_MODE (from);
5141 	      do_pending_stack_adjust ();
5142 
5143 	      /* Test whether the sign bit is set.  */
5144 	      emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5145 				       0, neglabel);
5146 
5147 	      /* The sign bit is not set.  Convert as signed.  */
5148 	      expand_float (target, from, 0);
5149 	      emit_jump_insn (gen_jump (label));
5150 	      emit_barrier ();
5151 
5152 	      /* The sign bit is set.
5153 		 Convert to a usable (positive signed) value by shifting right
5154 		 one bit, while remembering if a nonzero bit was shifted
5155 		 out; i.e., compute  (from & 1) | (from >> 1).  */
5156 
5157 	      emit_label (neglabel);
5158 	      temp = expand_binop (imode, and_optab, from, const1_rtx,
5159 				   NULL_RTX, 1, OPTAB_LIB_WIDEN);
5160 	      temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1);
5161 	      temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5162 				   OPTAB_LIB_WIDEN);
5163 	      expand_float (target, temp, 0);
5164 
5165 	      /* Multiply by 2 to undo the shift above.  */
5166 	      temp = expand_binop (fmode, add_optab, target, target,
5167 				   target, 0, OPTAB_LIB_WIDEN);
5168 	      if (temp != target)
5169 		emit_move_insn (target, temp);
5170 
5171 	      do_pending_stack_adjust ();
5172 	      emit_label (label);
5173 	      goto done;
5174 	    }
5175 	}
5176 
5177       /* If we are about to do some arithmetic to correct for an
5178 	 unsigned operand, do it in a pseudo-register.  */
5179 
5180       if (GET_MODE (to) != fmode
5181 	  || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5182 	target = gen_reg_rtx (fmode);
5183 
5184       /* Convert as signed integer to floating.  */
5185       expand_float (target, from, 0);
5186 
5187       /* If FROM is negative (and therefore TO is negative),
5188 	 correct its value by 2**bitwidth.  */
5189 
5190       do_pending_stack_adjust ();
5191       emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5192 			       0, label);
5193 
5194 
5195       real_2expN (&offset, GET_MODE_PRECISION (GET_MODE (from)), fmode);
5196       temp = expand_binop (fmode, add_optab, target,
5197 			   CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5198 			   target, 0, OPTAB_LIB_WIDEN);
5199       if (temp != target)
5200 	emit_move_insn (target, temp);
5201 
5202       do_pending_stack_adjust ();
5203       emit_label (label);
5204       goto done;
5205     }
5206 
5207   /* No hardware instruction available; call a library routine.  */
5208     {
5209       rtx libfunc;
5210       rtx_insn *insns;
5211       rtx value;
5212       convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5213 
5214       if (GET_MODE_PRECISION (GET_MODE (from)) < GET_MODE_PRECISION (SImode))
5215 	from = convert_to_mode (SImode, from, unsignedp);
5216 
5217       libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5218       gcc_assert (libfunc);
5219 
5220       start_sequence ();
5221 
5222       value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5223 				       GET_MODE (to), 1, from,
5224 				       GET_MODE (from));
5225       insns = get_insns ();
5226       end_sequence ();
5227 
5228       emit_libcall_block (insns, target, value,
5229 			  gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5230 					 GET_MODE (to), from));
5231     }
5232 
5233  done:
5234 
5235   /* Copy result to requested destination
5236      if we have been computing in a temp location.  */
5237 
5238   if (target != to)
5239     {
5240       if (GET_MODE (target) == GET_MODE (to))
5241 	emit_move_insn (to, target);
5242       else
5243 	convert_move (to, target, 0);
5244     }
5245 }
5246 
5247 /* Generate code to convert FROM to fixed point and store in TO.  FROM
5248    must be floating point.  */
5249 
5250 void
5251 expand_fix (rtx to, rtx from, int unsignedp)
5252 {
5253   enum insn_code icode;
5254   rtx target = to;
5255   machine_mode fmode, imode;
5256   int must_trunc = 0;
5257 
5258   /* We first try to find a pair of modes, one real and one integer, at
5259      least as wide as FROM and TO, respectively, in which we can open-code
5260      this conversion.  If the integer mode is wider than the mode of TO,
5261      we can do the conversion either signed or unsigned.  */
5262 
5263   for (fmode = GET_MODE (from); fmode != VOIDmode;
5264        fmode = GET_MODE_WIDER_MODE (fmode))
5265     for (imode = GET_MODE (to); imode != VOIDmode;
5266 	 imode = GET_MODE_WIDER_MODE (imode))
5267       {
5268 	int doing_unsigned = unsignedp;
5269 
5270 	icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5271 	if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5272 	  icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5273 
5274 	if (icode != CODE_FOR_nothing)
5275 	  {
5276 	    rtx_insn *last = get_last_insn ();
5277 	    if (fmode != GET_MODE (from))
5278 	      from = convert_to_mode (fmode, from, 0);
5279 
5280 	    if (must_trunc)
5281 	      {
5282 		rtx temp = gen_reg_rtx (GET_MODE (from));
5283 		from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5284 				    temp, 0);
5285 	      }
5286 
5287 	    if (imode != GET_MODE (to))
5288 	      target = gen_reg_rtx (imode);
5289 
5290 	    if (maybe_emit_unop_insn (icode, target, from,
5291 				      doing_unsigned ? UNSIGNED_FIX : FIX))
5292 	      {
5293 		if (target != to)
5294 		  convert_move (to, target, unsignedp);
5295 		return;
5296 	      }
5297 	    delete_insns_since (last);
5298 	  }
5299       }
5300 
5301   /* For an unsigned conversion, there is one more way to do it.
5302      If we have a signed conversion, we generate code that compares
5303      the real value to the largest representable positive number.  If if
5304      is smaller, the conversion is done normally.  Otherwise, subtract
5305      one plus the highest signed number, convert, and add it back.
5306 
5307      We only need to check all real modes, since we know we didn't find
5308      anything with a wider integer mode.
5309 
5310      This code used to extend FP value into mode wider than the destination.
5311      This is needed for decimal float modes which cannot accurately
5312      represent one plus the highest signed number of the same size, but
5313      not for binary modes.  Consider, for instance conversion from SFmode
5314      into DImode.
5315 
5316      The hot path through the code is dealing with inputs smaller than 2^63
5317      and doing just the conversion, so there is no bits to lose.
5318 
5319      In the other path we know the value is positive in the range 2^63..2^64-1
5320      inclusive.  (as for other input overflow happens and result is undefined)
5321      So we know that the most important bit set in mantissa corresponds to
5322      2^63.  The subtraction of 2^63 should not generate any rounding as it
5323      simply clears out that bit.  The rest is trivial.  */
5324 
5325   if (unsignedp && GET_MODE_PRECISION (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5326     for (fmode = GET_MODE (from); fmode != VOIDmode;
5327 	 fmode = GET_MODE_WIDER_MODE (fmode))
5328       if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5329 	  && (!DECIMAL_FLOAT_MODE_P (fmode)
5330 	      || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (GET_MODE (to))))
5331 	{
5332 	  int bitsize;
5333 	  REAL_VALUE_TYPE offset;
5334 	  rtx limit;
5335 	  rtx_code_label *lab1, *lab2;
5336 	  rtx_insn *insn;
5337 
5338 	  bitsize = GET_MODE_PRECISION (GET_MODE (to));
5339 	  real_2expN (&offset, bitsize - 1, fmode);
5340 	  limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5341 	  lab1 = gen_label_rtx ();
5342 	  lab2 = gen_label_rtx ();
5343 
5344 	  if (fmode != GET_MODE (from))
5345 	    from = convert_to_mode (fmode, from, 0);
5346 
5347 	  /* See if we need to do the subtraction.  */
5348 	  do_pending_stack_adjust ();
5349 	  emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5350 				   0, lab1);
5351 
5352 	  /* If not, do the signed "fix" and branch around fixup code.  */
5353 	  expand_fix (to, from, 0);
5354 	  emit_jump_insn (gen_jump (lab2));
5355 	  emit_barrier ();
5356 
5357 	  /* Otherwise, subtract 2**(N-1), convert to signed number,
5358 	     then add 2**(N-1).  Do the addition using XOR since this
5359 	     will often generate better code.  */
5360 	  emit_label (lab1);
5361 	  target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5362 				 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5363 	  expand_fix (to, target, 0);
5364 	  target = expand_binop (GET_MODE (to), xor_optab, to,
5365 				 gen_int_mode
5366 				 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5367 				  GET_MODE (to)),
5368 				 to, 1, OPTAB_LIB_WIDEN);
5369 
5370 	  if (target != to)
5371 	    emit_move_insn (to, target);
5372 
5373 	  emit_label (lab2);
5374 
5375 	  if (optab_handler (mov_optab, GET_MODE (to)) != CODE_FOR_nothing)
5376 	    {
5377 	      /* Make a place for a REG_NOTE and add it.  */
5378 	      insn = emit_move_insn (to, to);
5379 	      set_dst_reg_note (insn, REG_EQUAL,
5380 				gen_rtx_fmt_e (UNSIGNED_FIX, GET_MODE (to),
5381 					       copy_rtx (from)),
5382 				to);
5383 	    }
5384 
5385 	  return;
5386 	}
5387 
5388   /* We can't do it with an insn, so use a library call.  But first ensure
5389      that the mode of TO is at least as wide as SImode, since those are the
5390      only library calls we know about.  */
5391 
5392   if (GET_MODE_PRECISION (GET_MODE (to)) < GET_MODE_PRECISION (SImode))
5393     {
5394       target = gen_reg_rtx (SImode);
5395 
5396       expand_fix (target, from, unsignedp);
5397     }
5398   else
5399     {
5400       rtx_insn *insns;
5401       rtx value;
5402       rtx libfunc;
5403 
5404       convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5405       libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5406       gcc_assert (libfunc);
5407 
5408       start_sequence ();
5409 
5410       value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5411 				       GET_MODE (to), 1, from,
5412 				       GET_MODE (from));
5413       insns = get_insns ();
5414       end_sequence ();
5415 
5416       emit_libcall_block (insns, target, value,
5417 			  gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5418 					 GET_MODE (to), from));
5419     }
5420 
5421   if (target != to)
5422     {
5423       if (GET_MODE (to) == GET_MODE (target))
5424         emit_move_insn (to, target);
5425       else
5426         convert_move (to, target, 0);
5427     }
5428 }
5429 
5430 /* Generate code to convert FROM or TO a fixed-point.
5431    If UINTP is true, either TO or FROM is an unsigned integer.
5432    If SATP is true, we need to saturate the result.  */
5433 
5434 void
5435 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5436 {
5437   machine_mode to_mode = GET_MODE (to);
5438   machine_mode from_mode = GET_MODE (from);
5439   convert_optab tab;
5440   enum rtx_code this_code;
5441   enum insn_code code;
5442   rtx_insn *insns;
5443   rtx value;
5444   rtx libfunc;
5445 
5446   if (to_mode == from_mode)
5447     {
5448       emit_move_insn (to, from);
5449       return;
5450     }
5451 
5452   if (uintp)
5453     {
5454       tab = satp ? satfractuns_optab : fractuns_optab;
5455       this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5456     }
5457   else
5458     {
5459       tab = satp ? satfract_optab : fract_optab;
5460       this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5461     }
5462   code = convert_optab_handler (tab, to_mode, from_mode);
5463   if (code != CODE_FOR_nothing)
5464     {
5465       emit_unop_insn (code, to, from, this_code);
5466       return;
5467     }
5468 
5469   libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5470   gcc_assert (libfunc);
5471 
5472   start_sequence ();
5473   value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5474 				   1, from, from_mode);
5475   insns = get_insns ();
5476   end_sequence ();
5477 
5478   emit_libcall_block (insns, to, value,
5479 		      gen_rtx_fmt_e (optab_to_code (tab), to_mode, from));
5480 }
5481 
5482 /* Generate code to convert FROM to fixed point and store in TO.  FROM
5483    must be floating point, TO must be signed.  Use the conversion optab
5484    TAB to do the conversion.  */
5485 
5486 bool
5487 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5488 {
5489   enum insn_code icode;
5490   rtx target = to;
5491   machine_mode fmode, imode;
5492 
5493   /* We first try to find a pair of modes, one real and one integer, at
5494      least as wide as FROM and TO, respectively, in which we can open-code
5495      this conversion.  If the integer mode is wider than the mode of TO,
5496      we can do the conversion either signed or unsigned.  */
5497 
5498   for (fmode = GET_MODE (from); fmode != VOIDmode;
5499        fmode = GET_MODE_WIDER_MODE (fmode))
5500     for (imode = GET_MODE (to); imode != VOIDmode;
5501 	 imode = GET_MODE_WIDER_MODE (imode))
5502       {
5503 	icode = convert_optab_handler (tab, imode, fmode);
5504 	if (icode != CODE_FOR_nothing)
5505 	  {
5506 	    rtx_insn *last = get_last_insn ();
5507 	    if (fmode != GET_MODE (from))
5508 	      from = convert_to_mode (fmode, from, 0);
5509 
5510 	    if (imode != GET_MODE (to))
5511 	      target = gen_reg_rtx (imode);
5512 
5513 	    if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5514 	      {
5515 	        delete_insns_since (last);
5516 		continue;
5517 	      }
5518 	    if (target != to)
5519 	      convert_move (to, target, 0);
5520 	    return true;
5521 	  }
5522       }
5523 
5524   return false;
5525 }
5526 
5527 /* Report whether we have an instruction to perform the operation
5528    specified by CODE on operands of mode MODE.  */
5529 int
5530 have_insn_for (enum rtx_code code, machine_mode mode)
5531 {
5532   return (code_to_optab (code)
5533 	  && (optab_handler (code_to_optab (code), mode)
5534 	      != CODE_FOR_nothing));
5535 }
5536 
5537 /* Initialize the libfunc fields of an entire group of entries in some
5538    optab.  Each entry is set equal to a string consisting of a leading
5539    pair of underscores followed by a generic operation name followed by
5540    a mode name (downshifted to lowercase) followed by a single character
5541    representing the number of operands for the given operation (which is
5542    usually one of the characters '2', '3', or '4').
5543 
5544    OPTABLE is the table in which libfunc fields are to be initialized.
5545    OPNAME is the generic (string) name of the operation.
5546    SUFFIX is the character which specifies the number of operands for
5547      the given generic operation.
5548    MODE is the mode to generate for.
5549 */
5550 
5551 static void
5552 gen_libfunc (optab optable, const char *opname, int suffix,
5553 	     machine_mode mode)
5554 {
5555   unsigned opname_len = strlen (opname);
5556   const char *mname = GET_MODE_NAME (mode);
5557   unsigned mname_len = strlen (mname);
5558   int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5559   int len = prefix_len + opname_len + mname_len + 1 + 1;
5560   char *libfunc_name = XALLOCAVEC (char, len);
5561   char *p;
5562   const char *q;
5563 
5564   p = libfunc_name;
5565   *p++ = '_';
5566   *p++ = '_';
5567   if (targetm.libfunc_gnu_prefix)
5568     {
5569       *p++ = 'g';
5570       *p++ = 'n';
5571       *p++ = 'u';
5572       *p++ = '_';
5573     }
5574   for (q = opname; *q; )
5575     *p++ = *q++;
5576   for (q = mname; *q; q++)
5577     *p++ = TOLOWER (*q);
5578   *p++ = suffix;
5579   *p = '\0';
5580 
5581   set_optab_libfunc (optable, mode,
5582 		     ggc_alloc_string (libfunc_name, p - libfunc_name));
5583 }
5584 
5585 /* Like gen_libfunc, but verify that integer operation is involved.  */
5586 
5587 void
5588 gen_int_libfunc (optab optable, const char *opname, char suffix,
5589 		 machine_mode mode)
5590 {
5591   int maxsize = 2 * BITS_PER_WORD;
5592   int minsize = BITS_PER_WORD;
5593 
5594   if (GET_MODE_CLASS (mode) != MODE_INT)
5595     return;
5596   if (maxsize < LONG_LONG_TYPE_SIZE)
5597     maxsize = LONG_LONG_TYPE_SIZE;
5598   if (minsize > INT_TYPE_SIZE
5599       && (trapv_binoptab_p (optable)
5600 	  || trapv_unoptab_p (optable)))
5601     minsize = INT_TYPE_SIZE;
5602   if (GET_MODE_BITSIZE (mode) < minsize
5603       || GET_MODE_BITSIZE (mode) > maxsize)
5604     return;
5605   gen_libfunc (optable, opname, suffix, mode);
5606 }
5607 
5608 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed.  */
5609 
5610 void
5611 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5612 		machine_mode mode)
5613 {
5614   char *dec_opname;
5615 
5616   if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5617     gen_libfunc (optable, opname, suffix, mode);
5618   if (DECIMAL_FLOAT_MODE_P (mode))
5619     {
5620       dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5621       /* For BID support, change the name to have either a bid_ or dpd_ prefix
5622 	 depending on the low level floating format used.  */
5623       memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5624       strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5625       gen_libfunc (optable, dec_opname, suffix, mode);
5626     }
5627 }
5628 
5629 /* Like gen_libfunc, but verify that fixed-point operation is involved.  */
5630 
5631 void
5632 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5633 		   machine_mode mode)
5634 {
5635   if (!ALL_FIXED_POINT_MODE_P (mode))
5636     return;
5637   gen_libfunc (optable, opname, suffix, mode);
5638 }
5639 
5640 /* Like gen_libfunc, but verify that signed fixed-point operation is
5641    involved.  */
5642 
5643 void
5644 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5645 			  machine_mode mode)
5646 {
5647   if (!SIGNED_FIXED_POINT_MODE_P (mode))
5648     return;
5649   gen_libfunc (optable, opname, suffix, mode);
5650 }
5651 
5652 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5653    involved.  */
5654 
5655 void
5656 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5657 			    machine_mode mode)
5658 {
5659   if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5660     return;
5661   gen_libfunc (optable, opname, suffix, mode);
5662 }
5663 
5664 /* Like gen_libfunc, but verify that FP or INT operation is involved.  */
5665 
5666 void
5667 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5668 		    machine_mode mode)
5669 {
5670   if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5671     gen_fp_libfunc (optable, name, suffix, mode);
5672   if (INTEGRAL_MODE_P (mode))
5673     gen_int_libfunc (optable, name, suffix, mode);
5674 }
5675 
5676 /* Like gen_libfunc, but verify that FP or INT operation is involved
5677    and add 'v' suffix for integer operation.  */
5678 
5679 void
5680 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5681 		     machine_mode mode)
5682 {
5683   if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5684     gen_fp_libfunc (optable, name, suffix, mode);
5685   if (GET_MODE_CLASS (mode) == MODE_INT)
5686     {
5687       int len = strlen (name);
5688       char *v_name = XALLOCAVEC (char, len + 2);
5689       strcpy (v_name, name);
5690       v_name[len] = 'v';
5691       v_name[len + 1] = 0;
5692       gen_int_libfunc (optable, v_name, suffix, mode);
5693     }
5694 }
5695 
5696 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5697    involved.  */
5698 
5699 void
5700 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5701 			  machine_mode mode)
5702 {
5703   if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5704     gen_fp_libfunc (optable, name, suffix, mode);
5705   if (INTEGRAL_MODE_P (mode))
5706     gen_int_libfunc (optable, name, suffix, mode);
5707   if (ALL_FIXED_POINT_MODE_P (mode))
5708     gen_fixed_libfunc (optable, name, suffix, mode);
5709 }
5710 
5711 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5712    involved.  */
5713 
5714 void
5715 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5716 				 machine_mode mode)
5717 {
5718   if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5719     gen_fp_libfunc (optable, name, suffix, mode);
5720   if (INTEGRAL_MODE_P (mode))
5721     gen_int_libfunc (optable, name, suffix, mode);
5722   if (SIGNED_FIXED_POINT_MODE_P (mode))
5723     gen_signed_fixed_libfunc (optable, name, suffix, mode);
5724 }
5725 
5726 /* Like gen_libfunc, but verify that INT or FIXED operation is
5727    involved.  */
5728 
5729 void
5730 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5731 		       machine_mode mode)
5732 {
5733   if (INTEGRAL_MODE_P (mode))
5734     gen_int_libfunc (optable, name, suffix, mode);
5735   if (ALL_FIXED_POINT_MODE_P (mode))
5736     gen_fixed_libfunc (optable, name, suffix, mode);
5737 }
5738 
5739 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5740    involved.  */
5741 
5742 void
5743 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5744 			      machine_mode mode)
5745 {
5746   if (INTEGRAL_MODE_P (mode))
5747     gen_int_libfunc (optable, name, suffix, mode);
5748   if (SIGNED_FIXED_POINT_MODE_P (mode))
5749     gen_signed_fixed_libfunc (optable, name, suffix, mode);
5750 }
5751 
5752 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5753    involved.  */
5754 
5755 void
5756 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5757 				machine_mode mode)
5758 {
5759   if (INTEGRAL_MODE_P (mode))
5760     gen_int_libfunc (optable, name, suffix, mode);
5761   if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5762     gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5763 }
5764 
5765 /* Initialize the libfunc fields of an entire group of entries of an
5766    inter-mode-class conversion optab.  The string formation rules are
5767    similar to the ones for init_libfuncs, above, but instead of having
5768    a mode name and an operand count these functions have two mode names
5769    and no operand count.  */
5770 
5771 void
5772 gen_interclass_conv_libfunc (convert_optab tab,
5773 			     const char *opname,
5774 			     machine_mode tmode,
5775 			     machine_mode fmode)
5776 {
5777   size_t opname_len = strlen (opname);
5778   size_t mname_len = 0;
5779 
5780   const char *fname, *tname;
5781   const char *q;
5782   int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5783   char *libfunc_name, *suffix;
5784   char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5785   char *p;
5786 
5787   /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5788      depends on which underlying decimal floating point format is used.  */
5789   const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5790 
5791   mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5792 
5793   nondec_name = XALLOCAVEC (char, prefix_len + opname_len + mname_len + 1 + 1);
5794   nondec_name[0] = '_';
5795   nondec_name[1] = '_';
5796   if (targetm.libfunc_gnu_prefix)
5797     {
5798       nondec_name[2] = 'g';
5799       nondec_name[3] = 'n';
5800       nondec_name[4] = 'u';
5801       nondec_name[5] = '_';
5802     }
5803 
5804   memcpy (&nondec_name[prefix_len], opname, opname_len);
5805   nondec_suffix = nondec_name + opname_len + prefix_len;
5806 
5807   dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5808   dec_name[0] = '_';
5809   dec_name[1] = '_';
5810   memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5811   memcpy (&dec_name[2+dec_len], opname, opname_len);
5812   dec_suffix = dec_name + dec_len + opname_len + 2;
5813 
5814   fname = GET_MODE_NAME (fmode);
5815   tname = GET_MODE_NAME (tmode);
5816 
5817   if (DECIMAL_FLOAT_MODE_P (fmode) || DECIMAL_FLOAT_MODE_P (tmode))
5818     {
5819       libfunc_name = dec_name;
5820       suffix = dec_suffix;
5821     }
5822   else
5823     {
5824       libfunc_name = nondec_name;
5825       suffix = nondec_suffix;
5826     }
5827 
5828   p = suffix;
5829   for (q = fname; *q; p++, q++)
5830     *p = TOLOWER (*q);
5831   for (q = tname; *q; p++, q++)
5832     *p = TOLOWER (*q);
5833 
5834   *p = '\0';
5835 
5836   set_conv_libfunc (tab, tmode, fmode,
5837 		    ggc_alloc_string (libfunc_name, p - libfunc_name));
5838 }
5839 
5840 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5841    int->fp conversion.  */
5842 
5843 void
5844 gen_int_to_fp_conv_libfunc (convert_optab tab,
5845 			    const char *opname,
5846 			    machine_mode tmode,
5847 			    machine_mode fmode)
5848 {
5849   if (GET_MODE_CLASS (fmode) != MODE_INT)
5850     return;
5851   if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5852     return;
5853   gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5854 }
5855 
5856 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5857    naming scheme.  */
5858 
5859 void
5860 gen_ufloat_conv_libfunc (convert_optab tab,
5861 			 const char *opname ATTRIBUTE_UNUSED,
5862 			 machine_mode tmode,
5863 			 machine_mode fmode)
5864 {
5865   if (DECIMAL_FLOAT_MODE_P (tmode))
5866     gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5867   else
5868     gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5869 }
5870 
5871 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5872    fp->int conversion.  */
5873 
5874 void
5875 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5876 			               const char *opname,
5877 			               machine_mode tmode,
5878 			               machine_mode fmode)
5879 {
5880   if (GET_MODE_CLASS (fmode) != MODE_INT)
5881     return;
5882   if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5883     return;
5884   gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5885 }
5886 
5887 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5888    fp->int conversion with no decimal floating point involved.  */
5889 
5890 void
5891 gen_fp_to_int_conv_libfunc (convert_optab tab,
5892 			    const char *opname,
5893 			    machine_mode tmode,
5894 			    machine_mode fmode)
5895 {
5896   if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5897     return;
5898   if (GET_MODE_CLASS (tmode) != MODE_INT)
5899     return;
5900   gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5901 }
5902 
5903 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5904    The string formation rules are
5905    similar to the ones for init_libfunc, above.  */
5906 
5907 void
5908 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5909 			     machine_mode tmode, machine_mode fmode)
5910 {
5911   size_t opname_len = strlen (opname);
5912   size_t mname_len = 0;
5913 
5914   const char *fname, *tname;
5915   const char *q;
5916   int prefix_len = targetm.libfunc_gnu_prefix ? 6 : 2;
5917   char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5918   char *libfunc_name, *suffix;
5919   char *p;
5920 
5921   /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5922      depends on which underlying decimal floating point format is used.  */
5923   const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5924 
5925   mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5926 
5927   nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5928   nondec_name[0] = '_';
5929   nondec_name[1] = '_';
5930   if (targetm.libfunc_gnu_prefix)
5931     {
5932       nondec_name[2] = 'g';
5933       nondec_name[3] = 'n';
5934       nondec_name[4] = 'u';
5935       nondec_name[5] = '_';
5936     }
5937   memcpy (&nondec_name[prefix_len], opname, opname_len);
5938   nondec_suffix = nondec_name + opname_len + prefix_len;
5939 
5940   dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5941   dec_name[0] = '_';
5942   dec_name[1] = '_';
5943   memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5944   memcpy (&dec_name[2 + dec_len], opname, opname_len);
5945   dec_suffix = dec_name + dec_len + opname_len + 2;
5946 
5947   fname = GET_MODE_NAME (fmode);
5948   tname = GET_MODE_NAME (tmode);
5949 
5950   if (DECIMAL_FLOAT_MODE_P (fmode) || DECIMAL_FLOAT_MODE_P (tmode))
5951     {
5952       libfunc_name = dec_name;
5953       suffix = dec_suffix;
5954     }
5955   else
5956     {
5957       libfunc_name = nondec_name;
5958       suffix = nondec_suffix;
5959     }
5960 
5961   p = suffix;
5962   for (q = fname; *q; p++, q++)
5963     *p = TOLOWER (*q);
5964   for (q = tname; *q; p++, q++)
5965     *p = TOLOWER (*q);
5966 
5967   *p++ = '2';
5968   *p = '\0';
5969 
5970   set_conv_libfunc (tab, tmode, fmode,
5971 		    ggc_alloc_string (libfunc_name, p - libfunc_name));
5972 }
5973 
5974 /* Pick proper libcall for trunc_optab.  We need to chose if we do
5975    truncation or extension and interclass or intraclass.  */
5976 
5977 void
5978 gen_trunc_conv_libfunc (convert_optab tab,
5979 			 const char *opname,
5980 			 machine_mode tmode,
5981 			 machine_mode fmode)
5982 {
5983   if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5984     return;
5985   if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5986     return;
5987   if (tmode == fmode)
5988     return;
5989 
5990   if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5991       || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5992      gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5993 
5994   if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5995     return;
5996 
5997   if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5998        && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5999       || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
6000     gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6001 }
6002 
6003 /* Pick proper libcall for extend_optab.  We need to chose if we do
6004    truncation or extension and interclass or intraclass.  */
6005 
6006 void
6007 gen_extend_conv_libfunc (convert_optab tab,
6008 			 const char *opname ATTRIBUTE_UNUSED,
6009 			 machine_mode tmode,
6010 			 machine_mode fmode)
6011 {
6012   if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
6013     return;
6014   if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
6015     return;
6016   if (tmode == fmode)
6017     return;
6018 
6019   if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
6020       || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
6021      gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6022 
6023   if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
6024     return;
6025 
6026   if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
6027        && GET_MODE_CLASS (fmode) == MODE_FLOAT)
6028       || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
6029     gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6030 }
6031 
6032 /* Pick proper libcall for fract_optab.  We need to chose if we do
6033    interclass or intraclass.  */
6034 
6035 void
6036 gen_fract_conv_libfunc (convert_optab tab,
6037 			const char *opname,
6038 			machine_mode tmode,
6039 			machine_mode fmode)
6040 {
6041   if (tmode == fmode)
6042     return;
6043   if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
6044     return;
6045 
6046   if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6047     gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6048   else
6049     gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6050 }
6051 
6052 /* Pick proper libcall for fractuns_optab.  */
6053 
6054 void
6055 gen_fractuns_conv_libfunc (convert_optab tab,
6056 			   const char *opname,
6057 			   machine_mode tmode,
6058 			   machine_mode fmode)
6059 {
6060   if (tmode == fmode)
6061     return;
6062   /* One mode must be a fixed-point mode, and the other must be an integer
6063      mode. */
6064   if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
6065 	|| (ALL_FIXED_POINT_MODE_P (fmode)
6066 	    && GET_MODE_CLASS (tmode) == MODE_INT)))
6067     return;
6068 
6069   gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6070 }
6071 
6072 /* Pick proper libcall for satfract_optab.  We need to chose if we do
6073    interclass or intraclass.  */
6074 
6075 void
6076 gen_satfract_conv_libfunc (convert_optab tab,
6077 			   const char *opname,
6078 			   machine_mode tmode,
6079 			   machine_mode fmode)
6080 {
6081   if (tmode == fmode)
6082     return;
6083   /* TMODE must be a fixed-point mode.  */
6084   if (!ALL_FIXED_POINT_MODE_P (tmode))
6085     return;
6086 
6087   if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6088     gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6089   else
6090     gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6091 }
6092 
6093 /* Pick proper libcall for satfractuns_optab.  */
6094 
6095 void
6096 gen_satfractuns_conv_libfunc (convert_optab tab,
6097 			      const char *opname,
6098 			      machine_mode tmode,
6099 			      machine_mode fmode)
6100 {
6101   if (tmode == fmode)
6102     return;
6103   /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6104   if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6105     return;
6106 
6107   gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6108 }
6109 
6110 /* Hashtable callbacks for libfunc_decls.  */
6111 
6112 struct libfunc_decl_hasher : ggc_hasher<tree>
6113 {
6114   static hashval_t
6115   hash (tree entry)
6116   {
6117     return IDENTIFIER_HASH_VALUE (DECL_NAME (entry));
6118   }
6119 
6120   static bool
6121   equal (tree decl, tree name)
6122   {
6123     return DECL_NAME (decl) == name;
6124   }
6125 };
6126 
6127 /* A table of previously-created libfuncs, hashed by name.  */
6128 static GTY (()) hash_table<libfunc_decl_hasher> *libfunc_decls;
6129 
6130 /* Build a decl for a libfunc named NAME. */
6131 
6132 tree
6133 build_libfunc_function (const char *name)
6134 {
6135   tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
6136 			  get_identifier (name),
6137                           build_function_type (integer_type_node, NULL_TREE));
6138   /* ??? We don't have any type information except for this is
6139      a function.  Pretend this is "int foo()".  */
6140   DECL_ARTIFICIAL (decl) = 1;
6141   DECL_EXTERNAL (decl) = 1;
6142   TREE_PUBLIC (decl) = 1;
6143   gcc_assert (DECL_ASSEMBLER_NAME (decl));
6144 
6145   /* Zap the nonsensical SYMBOL_REF_DECL for this.  What we're left with
6146      are the flags assigned by targetm.encode_section_info.  */
6147   SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6148 
6149   return decl;
6150 }
6151 
6152 rtx
6153 init_one_libfunc (const char *name)
6154 {
6155   tree id, decl;
6156   hashval_t hash;
6157 
6158   if (libfunc_decls == NULL)
6159     libfunc_decls = hash_table<libfunc_decl_hasher>::create_ggc (37);
6160 
6161   /* See if we have already created a libfunc decl for this function.  */
6162   id = get_identifier (name);
6163   hash = IDENTIFIER_HASH_VALUE (id);
6164   tree *slot = libfunc_decls->find_slot_with_hash (id, hash, INSERT);
6165   decl = *slot;
6166   if (decl == NULL)
6167     {
6168       /* Create a new decl, so that it can be passed to
6169 	 targetm.encode_section_info.  */
6170       decl = build_libfunc_function (name);
6171       *slot = decl;
6172     }
6173   return XEXP (DECL_RTL (decl), 0);
6174 }
6175 
6176 /* Adjust the assembler name of libfunc NAME to ASMSPEC.  */
6177 
6178 rtx
6179 set_user_assembler_libfunc (const char *name, const char *asmspec)
6180 {
6181   tree id, decl;
6182   hashval_t hash;
6183 
6184   id = get_identifier (name);
6185   hash = IDENTIFIER_HASH_VALUE (id);
6186   tree *slot = libfunc_decls->find_slot_with_hash (id, hash, NO_INSERT);
6187   gcc_assert (slot);
6188   decl = (tree) *slot;
6189   set_user_assembler_name (decl, asmspec);
6190   return XEXP (DECL_RTL (decl), 0);
6191 }
6192 
6193 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6194    MODE to NAME, which should be either 0 or a string constant.  */
6195 void
6196 set_optab_libfunc (optab op, machine_mode mode, const char *name)
6197 {
6198   rtx val;
6199   struct libfunc_entry e;
6200   struct libfunc_entry **slot;
6201 
6202   e.op = op;
6203   e.mode1 = mode;
6204   e.mode2 = VOIDmode;
6205 
6206   if (name)
6207     val = init_one_libfunc (name);
6208   else
6209     val = 0;
6210   slot = libfunc_hash->find_slot (&e, INSERT);
6211   if (*slot == NULL)
6212     *slot = ggc_alloc<libfunc_entry> ();
6213   (*slot)->op = op;
6214   (*slot)->mode1 = mode;
6215   (*slot)->mode2 = VOIDmode;
6216   (*slot)->libfunc = val;
6217 }
6218 
6219 /* Call this to reset the function entry for one conversion optab
6220    (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6221    either 0 or a string constant.  */
6222 void
6223 set_conv_libfunc (convert_optab optab, machine_mode tmode,
6224 		  machine_mode fmode, const char *name)
6225 {
6226   rtx val;
6227   struct libfunc_entry e;
6228   struct libfunc_entry **slot;
6229 
6230   e.op = optab;
6231   e.mode1 = tmode;
6232   e.mode2 = fmode;
6233 
6234   if (name)
6235     val = init_one_libfunc (name);
6236   else
6237     val = 0;
6238   slot = libfunc_hash->find_slot (&e, INSERT);
6239   if (*slot == NULL)
6240     *slot = ggc_alloc<libfunc_entry> ();
6241   (*slot)->op = optab;
6242   (*slot)->mode1 = tmode;
6243   (*slot)->mode2 = fmode;
6244   (*slot)->libfunc = val;
6245 }
6246 
6247 /* Call this to initialize the contents of the optabs
6248    appropriately for the current target machine.  */
6249 
6250 void
6251 init_optabs (void)
6252 {
6253   if (libfunc_hash)
6254     libfunc_hash->empty ();
6255   else
6256     libfunc_hash = hash_table<libfunc_hasher>::create_ggc (10);
6257 
6258   /* Fill in the optabs with the insns we support.  */
6259   init_all_optabs (this_fn_optabs);
6260 
6261   /* The ffs function operates on `int'.  Fall back on it if we do not
6262      have a libgcc2 function for that width.  */
6263   if (INT_TYPE_SIZE < BITS_PER_WORD)
6264     set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6265 		       "ffs");
6266 
6267   /* Explicitly initialize the bswap libfuncs since we need them to be
6268      valid for things other than word_mode.  */
6269   if (targetm.libfunc_gnu_prefix)
6270     {
6271       set_optab_libfunc (bswap_optab, SImode, "__gnu_bswapsi2");
6272       set_optab_libfunc (bswap_optab, DImode, "__gnu_bswapdi2");
6273     }
6274   else
6275     {
6276       set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6277       set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6278     }
6279 
6280   /* Use cabs for double complex abs, since systems generally have cabs.
6281      Don't define any libcall for float complex, so that cabs will be used.  */
6282   if (complex_double_type_node)
6283     set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node),
6284 		       "cabs");
6285 
6286   abort_libfunc = init_one_libfunc ("abort");
6287   memcpy_libfunc = init_one_libfunc ("memcpy");
6288   memmove_libfunc = init_one_libfunc ("memmove");
6289   memcmp_libfunc = init_one_libfunc ("memcmp");
6290   memset_libfunc = init_one_libfunc ("memset");
6291   setbits_libfunc = init_one_libfunc ("__setbits");
6292 
6293 #ifndef DONT_USE_BUILTIN_SETJMP
6294   setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6295   longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6296 #else
6297   setjmp_libfunc = init_one_libfunc ("setjmp");
6298   longjmp_libfunc = init_one_libfunc ("longjmp");
6299 #endif
6300   unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6301   unwind_sjlj_unregister_libfunc
6302     = init_one_libfunc ("_Unwind_SjLj_Unregister");
6303 
6304   /* For function entry/exit instrumentation.  */
6305   profile_function_entry_libfunc
6306     = init_one_libfunc ("__cyg_profile_func_enter");
6307   profile_function_exit_libfunc
6308     = init_one_libfunc ("__cyg_profile_func_exit");
6309 
6310   gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6311 
6312   /* Allow the target to add more libcalls or rename some, etc.  */
6313   targetm.init_libfuncs ();
6314 }
6315 
6316 /* Use the current target and options to initialize
6317    TREE_OPTIMIZATION_OPTABS (OPTNODE).  */
6318 
6319 void
6320 init_tree_optimization_optabs (tree optnode)
6321 {
6322   /* Quick exit if we have already computed optabs for this target.  */
6323   if (TREE_OPTIMIZATION_BASE_OPTABS (optnode) == this_target_optabs)
6324     return;
6325 
6326   /* Forget any previous information and set up for the current target.  */
6327   TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs;
6328   struct target_optabs *tmp_optabs = (struct target_optabs *)
6329     TREE_OPTIMIZATION_OPTABS (optnode);
6330   if (tmp_optabs)
6331     memset (tmp_optabs, 0, sizeof (struct target_optabs));
6332   else
6333     tmp_optabs = ggc_alloc<target_optabs> ();
6334 
6335   /* Generate a new set of optabs into tmp_optabs.  */
6336   init_all_optabs (tmp_optabs);
6337 
6338   /* If the optabs changed, record it.  */
6339   if (memcmp (tmp_optabs, this_target_optabs, sizeof (struct target_optabs)))
6340     TREE_OPTIMIZATION_OPTABS (optnode) = tmp_optabs;
6341   else
6342     {
6343       TREE_OPTIMIZATION_OPTABS (optnode) = NULL;
6344       ggc_free (tmp_optabs);
6345     }
6346 }
6347 
6348 /* A helper function for init_sync_libfuncs.  Using the basename BASE,
6349    install libfuncs into TAB for BASE_N for 1 <= N <= MAX.  */
6350 
6351 static void
6352 init_sync_libfuncs_1 (optab tab, const char *base, int max)
6353 {
6354   machine_mode mode;
6355   char buf[64];
6356   size_t len = strlen (base);
6357   int i;
6358 
6359   gcc_assert (max <= 8);
6360   gcc_assert (len + 3 < sizeof (buf));
6361 
6362   memcpy (buf, base, len);
6363   buf[len] = '_';
6364   buf[len + 1] = '0';
6365   buf[len + 2] = '\0';
6366 
6367   mode = QImode;
6368   for (i = 1; i <= max; i *= 2)
6369     {
6370       buf[len + 1] = '0' + i;
6371       set_optab_libfunc (tab, mode, buf);
6372       mode = GET_MODE_2XWIDER_MODE (mode);
6373     }
6374 }
6375 
6376 void
6377 init_sync_libfuncs (int max)
6378 {
6379   if (!flag_sync_libcalls)
6380     return;
6381 
6382   init_sync_libfuncs_1 (sync_compare_and_swap_optab,
6383 			"__sync_val_compare_and_swap", max);
6384   init_sync_libfuncs_1 (sync_lock_test_and_set_optab,
6385 			"__sync_lock_test_and_set", max);
6386 
6387   init_sync_libfuncs_1 (sync_old_add_optab, "__sync_fetch_and_add", max);
6388   init_sync_libfuncs_1 (sync_old_sub_optab, "__sync_fetch_and_sub", max);
6389   init_sync_libfuncs_1 (sync_old_ior_optab, "__sync_fetch_and_or", max);
6390   init_sync_libfuncs_1 (sync_old_and_optab, "__sync_fetch_and_and", max);
6391   init_sync_libfuncs_1 (sync_old_xor_optab, "__sync_fetch_and_xor", max);
6392   init_sync_libfuncs_1 (sync_old_nand_optab, "__sync_fetch_and_nand", max);
6393 
6394   init_sync_libfuncs_1 (sync_new_add_optab, "__sync_add_and_fetch", max);
6395   init_sync_libfuncs_1 (sync_new_sub_optab, "__sync_sub_and_fetch", max);
6396   init_sync_libfuncs_1 (sync_new_ior_optab, "__sync_or_and_fetch", max);
6397   init_sync_libfuncs_1 (sync_new_and_optab, "__sync_and_and_fetch", max);
6398   init_sync_libfuncs_1 (sync_new_xor_optab, "__sync_xor_and_fetch", max);
6399   init_sync_libfuncs_1 (sync_new_nand_optab, "__sync_nand_and_fetch", max);
6400 }
6401 
6402 /* Print information about the current contents of the optabs on
6403    STDERR.  */
6404 
6405 DEBUG_FUNCTION void
6406 debug_optab_libfuncs (void)
6407 {
6408   int i, j, k;
6409 
6410   /* Dump the arithmetic optabs.  */
6411   for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i)
6412     for (j = 0; j < NUM_MACHINE_MODES; ++j)
6413       {
6414 	rtx l = optab_libfunc ((optab) i, (machine_mode) j);
6415 	if (l)
6416 	  {
6417 	    gcc_assert (GET_CODE (l) == SYMBOL_REF);
6418 	    fprintf (stderr, "%s\t%s:\t%s\n",
6419 		     GET_RTX_NAME (optab_to_code ((optab) i)),
6420 		     GET_MODE_NAME (j),
6421 		     XSTR (l, 0));
6422 	  }
6423       }
6424 
6425   /* Dump the conversion optabs.  */
6426   for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i)
6427     for (j = 0; j < NUM_MACHINE_MODES; ++j)
6428       for (k = 0; k < NUM_MACHINE_MODES; ++k)
6429 	{
6430 	  rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j,
6431 					 (machine_mode) k);
6432 	  if (l)
6433 	    {
6434 	      gcc_assert (GET_CODE (l) == SYMBOL_REF);
6435 	      fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6436 		       GET_RTX_NAME (optab_to_code ((optab) i)),
6437 		       GET_MODE_NAME (j),
6438 		       GET_MODE_NAME (k),
6439 		       XSTR (l, 0));
6440 	    }
6441 	}
6442 }
6443 
6444 
6445 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6446    CODE.  Return 0 on failure.  */
6447 
6448 rtx
6449 gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
6450 {
6451   machine_mode mode = GET_MODE (op1);
6452   enum insn_code icode;
6453   rtx insn;
6454   rtx trap_rtx;
6455 
6456   if (mode == VOIDmode)
6457     return 0;
6458 
6459   icode = optab_handler (ctrap_optab, mode);
6460   if (icode == CODE_FOR_nothing)
6461     return 0;
6462 
6463   /* Some targets only accept a zero trap code.  */
6464   if (!insn_operand_matches (icode, 3, tcode))
6465     return 0;
6466 
6467   do_pending_stack_adjust ();
6468   start_sequence ();
6469   prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
6470 		    &trap_rtx, &mode);
6471   if (!trap_rtx)
6472     insn = NULL_RTX;
6473   else
6474     insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
6475 			    tcode);
6476 
6477   /* If that failed, then give up.  */
6478   if (insn == 0)
6479     {
6480       end_sequence ();
6481       return 0;
6482     }
6483 
6484   emit_insn (insn);
6485   insn = get_insns ();
6486   end_sequence ();
6487   return insn;
6488 }
6489 
6490 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6491    or unsigned operation code.  */
6492 
6493 enum rtx_code
6494 get_rtx_code (enum tree_code tcode, bool unsignedp)
6495 {
6496   enum rtx_code code;
6497   switch (tcode)
6498     {
6499     case EQ_EXPR:
6500       code = EQ;
6501       break;
6502     case NE_EXPR:
6503       code = NE;
6504       break;
6505     case LT_EXPR:
6506       code = unsignedp ? LTU : LT;
6507       break;
6508     case LE_EXPR:
6509       code = unsignedp ? LEU : LE;
6510       break;
6511     case GT_EXPR:
6512       code = unsignedp ? GTU : GT;
6513       break;
6514     case GE_EXPR:
6515       code = unsignedp ? GEU : GE;
6516       break;
6517 
6518     case UNORDERED_EXPR:
6519       code = UNORDERED;
6520       break;
6521     case ORDERED_EXPR:
6522       code = ORDERED;
6523       break;
6524     case UNLT_EXPR:
6525       code = UNLT;
6526       break;
6527     case UNLE_EXPR:
6528       code = UNLE;
6529       break;
6530     case UNGT_EXPR:
6531       code = UNGT;
6532       break;
6533     case UNGE_EXPR:
6534       code = UNGE;
6535       break;
6536     case UNEQ_EXPR:
6537       code = UNEQ;
6538       break;
6539     case LTGT_EXPR:
6540       code = LTGT;
6541       break;
6542 
6543     case BIT_AND_EXPR:
6544       code = AND;
6545       break;
6546 
6547     case BIT_IOR_EXPR:
6548       code = IOR;
6549       break;
6550 
6551     default:
6552       gcc_unreachable ();
6553     }
6554   return code;
6555 }
6556 
6557 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6558    unsigned operators. Do not generate compare instruction.  */
6559 
6560 static rtx
6561 vector_compare_rtx (enum tree_code tcode, tree t_op0, tree t_op1,
6562 		    bool unsignedp, enum insn_code icode)
6563 {
6564   struct expand_operand ops[2];
6565   rtx rtx_op0, rtx_op1;
6566   enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
6567 
6568   gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison);
6569 
6570   /* Expand operands.  */
6571   rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6572 			 EXPAND_STACK_PARM);
6573   rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6574 			 EXPAND_STACK_PARM);
6575 
6576   create_input_operand (&ops[0], rtx_op0, GET_MODE (rtx_op0));
6577   create_input_operand (&ops[1], rtx_op1, GET_MODE (rtx_op1));
6578   if (!maybe_legitimize_operands (icode, 4, 2, ops))
6579     gcc_unreachable ();
6580   return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
6581 }
6582 
6583 /* Return true if VEC_PERM_EXPR of arbitrary input vectors can be expanded using
6584    SIMD extensions of the CPU.  SEL may be NULL, which stands for an unknown
6585    constant.  Note that additional permutations representing whole-vector shifts
6586    may also be handled via the vec_shr optab, but only where the second input
6587    vector is entirely constant zeroes; this case is not dealt with here.  */
6588 
6589 bool
6590 can_vec_perm_p (machine_mode mode, bool variable,
6591 		const unsigned char *sel)
6592 {
6593   machine_mode qimode;
6594 
6595   /* If the target doesn't implement a vector mode for the vector type,
6596      then no operations are supported.  */
6597   if (!VECTOR_MODE_P (mode))
6598     return false;
6599 
6600   if (!variable)
6601     {
6602       if (direct_optab_handler (vec_perm_const_optab, mode) != CODE_FOR_nothing
6603 	  && (sel == NULL
6604 	      || targetm.vectorize.vec_perm_const_ok == NULL
6605 	      || targetm.vectorize.vec_perm_const_ok (mode, sel)))
6606 	return true;
6607     }
6608 
6609   if (direct_optab_handler (vec_perm_optab, mode) != CODE_FOR_nothing)
6610     return true;
6611 
6612   /* We allow fallback to a QI vector mode, and adjust the mask.  */
6613   if (GET_MODE_INNER (mode) == QImode)
6614     return false;
6615   qimode = mode_for_vector (QImode, GET_MODE_SIZE (mode));
6616   if (!VECTOR_MODE_P (qimode))
6617     return false;
6618 
6619   /* ??? For completeness, we ought to check the QImode version of
6620       vec_perm_const_optab.  But all users of this implicit lowering
6621       feature implement the variable vec_perm_optab.  */
6622   if (direct_optab_handler (vec_perm_optab, qimode) == CODE_FOR_nothing)
6623     return false;
6624 
6625   /* In order to support the lowering of variable permutations,
6626      we need to support shifts and adds.  */
6627   if (variable)
6628     {
6629       if (GET_MODE_UNIT_SIZE (mode) > 2
6630 	  && optab_handler (ashl_optab, mode) == CODE_FOR_nothing
6631 	  && optab_handler (vashl_optab, mode) == CODE_FOR_nothing)
6632 	return false;
6633       if (optab_handler (add_optab, qimode) == CODE_FOR_nothing)
6634 	return false;
6635     }
6636 
6637   return true;
6638 }
6639 
6640 /* Checks if vec_perm mask SEL is a constant equivalent to a shift of the first
6641    vec_perm operand, assuming the second operand is a constant vector of zeroes.
6642    Return the shift distance in bits if so, or NULL_RTX if the vec_perm is not a
6643    shift.  */
6644 static rtx
6645 shift_amt_for_vec_perm_mask (rtx sel)
6646 {
6647   unsigned int i, first, nelt = GET_MODE_NUNITS (GET_MODE (sel));
6648   unsigned int bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (sel)));
6649 
6650   if (GET_CODE (sel) != CONST_VECTOR)
6651     return NULL_RTX;
6652 
6653   first = INTVAL (CONST_VECTOR_ELT (sel, 0));
6654   if (first >= nelt)
6655     return NULL_RTX;
6656   for (i = 1; i < nelt; i++)
6657     {
6658       int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
6659       unsigned int expected = i + first;
6660       /* Indices into the second vector are all equivalent.  */
6661       if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
6662 	return NULL_RTX;
6663     }
6664 
6665   return GEN_INT (first * bitsize);
6666 }
6667 
6668 /* A subroutine of expand_vec_perm for expanding one vec_perm insn.  */
6669 
6670 static rtx
6671 expand_vec_perm_1 (enum insn_code icode, rtx target,
6672 		   rtx v0, rtx v1, rtx sel)
6673 {
6674   machine_mode tmode = GET_MODE (target);
6675   machine_mode smode = GET_MODE (sel);
6676   struct expand_operand ops[4];
6677 
6678   create_output_operand (&ops[0], target, tmode);
6679   create_input_operand (&ops[3], sel, smode);
6680 
6681   /* Make an effort to preserve v0 == v1.  The target expander is able to
6682      rely on this to determine if we're permuting a single input operand.  */
6683   if (rtx_equal_p (v0, v1))
6684     {
6685       if (!insn_operand_matches (icode, 1, v0))
6686         v0 = force_reg (tmode, v0);
6687       gcc_checking_assert (insn_operand_matches (icode, 1, v0));
6688       gcc_checking_assert (insn_operand_matches (icode, 2, v0));
6689 
6690       create_fixed_operand (&ops[1], v0);
6691       create_fixed_operand (&ops[2], v0);
6692     }
6693   else
6694     {
6695       create_input_operand (&ops[1], v0, tmode);
6696       create_input_operand (&ops[2], v1, tmode);
6697     }
6698 
6699   if (maybe_expand_insn (icode, 4, ops))
6700     return ops[0].value;
6701   return NULL_RTX;
6702 }
6703 
6704 /* Generate instructions for vec_perm optab given its mode
6705    and three operands.  */
6706 
6707 rtx
6708 expand_vec_perm (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target)
6709 {
6710   enum insn_code icode;
6711   machine_mode qimode;
6712   unsigned int i, w, e, u;
6713   rtx tmp, sel_qi = NULL;
6714   rtvec vec;
6715 
6716   if (!target || GET_MODE (target) != mode)
6717     target = gen_reg_rtx (mode);
6718 
6719   w = GET_MODE_SIZE (mode);
6720   e = GET_MODE_NUNITS (mode);
6721   u = GET_MODE_UNIT_SIZE (mode);
6722 
6723   /* Set QIMODE to a different vector mode with byte elements.
6724      If no such mode, or if MODE already has byte elements, use VOIDmode.  */
6725   qimode = VOIDmode;
6726   if (GET_MODE_INNER (mode) != QImode)
6727     {
6728       qimode = mode_for_vector (QImode, w);
6729       if (!VECTOR_MODE_P (qimode))
6730 	qimode = VOIDmode;
6731     }
6732 
6733   /* If the input is a constant, expand it specially.  */
6734   gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
6735   if (GET_CODE (sel) == CONST_VECTOR)
6736     {
6737       /* See if this can be handled with a vec_shr.  We only do this if the
6738 	 second vector is all zeroes.  */
6739       enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
6740       enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
6741 				      ? optab_handler (vec_shr_optab, qimode)
6742 				      : CODE_FOR_nothing);
6743       rtx shift_amt = NULL_RTX;
6744       if (v1 == CONST0_RTX (GET_MODE (v1))
6745 	  && (shift_code != CODE_FOR_nothing
6746 	      || shift_code_qi != CODE_FOR_nothing))
6747 	{
6748 	  shift_amt = shift_amt_for_vec_perm_mask (sel);
6749 	  if (shift_amt)
6750 	    {
6751 	      struct expand_operand ops[3];
6752 	      if (shift_code != CODE_FOR_nothing)
6753 		{
6754 		  create_output_operand (&ops[0], target, mode);
6755 		  create_input_operand (&ops[1], v0, mode);
6756 		  create_convert_operand_from_type (&ops[2], shift_amt,
6757 						    sizetype);
6758 		  if (maybe_expand_insn (shift_code, 3, ops))
6759 		    return ops[0].value;
6760 		}
6761 	      if (shift_code_qi != CODE_FOR_nothing)
6762 		{
6763 		  tmp = gen_reg_rtx (qimode);
6764 		  create_output_operand (&ops[0], tmp, qimode);
6765 		  create_input_operand (&ops[1], gen_lowpart (qimode, v0),
6766 					qimode);
6767 		  create_convert_operand_from_type (&ops[2], shift_amt,
6768 						    sizetype);
6769 		  if (maybe_expand_insn (shift_code_qi, 3, ops))
6770 		    return gen_lowpart (mode, ops[0].value);
6771 		}
6772 	    }
6773 	}
6774 
6775       icode = direct_optab_handler (vec_perm_const_optab, mode);
6776       if (icode != CODE_FOR_nothing)
6777 	{
6778 	  tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6779 	  if (tmp)
6780 	    return tmp;
6781 	}
6782 
6783       /* Fall back to a constant byte-based permutation.  */
6784       if (qimode != VOIDmode)
6785 	{
6786 	  vec = rtvec_alloc (w);
6787 	  for (i = 0; i < e; ++i)
6788 	    {
6789 	      unsigned int j, this_e;
6790 
6791 	      this_e = INTVAL (CONST_VECTOR_ELT (sel, i));
6792 	      this_e &= 2 * e - 1;
6793 	      this_e *= u;
6794 
6795 	      for (j = 0; j < u; ++j)
6796 		RTVEC_ELT (vec, i * u + j) = GEN_INT (this_e + j);
6797 	    }
6798 	  sel_qi = gen_rtx_CONST_VECTOR (qimode, vec);
6799 
6800 	  icode = direct_optab_handler (vec_perm_const_optab, qimode);
6801 	  if (icode != CODE_FOR_nothing)
6802 	    {
6803 	      tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
6804 	      tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
6805 				       gen_lowpart (qimode, v1), sel_qi);
6806 	      if (tmp)
6807 		return gen_lowpart (mode, tmp);
6808 	    }
6809 	}
6810     }
6811 
6812   /* Otherwise expand as a fully variable permuation.  */
6813   icode = direct_optab_handler (vec_perm_optab, mode);
6814   if (icode != CODE_FOR_nothing)
6815     {
6816       tmp = expand_vec_perm_1 (icode, target, v0, v1, sel);
6817       if (tmp)
6818 	return tmp;
6819     }
6820 
6821   /* As a special case to aid several targets, lower the element-based
6822      permutation to a byte-based permutation and try again.  */
6823   if (qimode == VOIDmode)
6824     return NULL_RTX;
6825   icode = direct_optab_handler (vec_perm_optab, qimode);
6826   if (icode == CODE_FOR_nothing)
6827     return NULL_RTX;
6828 
6829   if (sel_qi == NULL)
6830     {
6831       /* Multiply each element by its byte size.  */
6832       machine_mode selmode = GET_MODE (sel);
6833       if (u == 2)
6834 	sel = expand_simple_binop (selmode, PLUS, sel, sel,
6835 				   NULL, 0, OPTAB_DIRECT);
6836       else
6837 	sel = expand_simple_binop (selmode, ASHIFT, sel,
6838 				   GEN_INT (exact_log2 (u)),
6839 				   NULL, 0, OPTAB_DIRECT);
6840       gcc_assert (sel != NULL);
6841 
6842       /* Broadcast the low byte each element into each of its bytes.  */
6843       vec = rtvec_alloc (w);
6844       for (i = 0; i < w; ++i)
6845 	{
6846 	  int this_e = i / u * u;
6847 	  if (BYTES_BIG_ENDIAN)
6848 	    this_e += u - 1;
6849 	  RTVEC_ELT (vec, i) = GEN_INT (this_e);
6850 	}
6851       tmp = gen_rtx_CONST_VECTOR (qimode, vec);
6852       sel = gen_lowpart (qimode, sel);
6853       sel = expand_vec_perm (qimode, sel, sel, tmp, NULL);
6854       gcc_assert (sel != NULL);
6855 
6856       /* Add the byte offset to each byte element.  */
6857       /* Note that the definition of the indicies here is memory ordering,
6858 	 so there should be no difference between big and little endian.  */
6859       vec = rtvec_alloc (w);
6860       for (i = 0; i < w; ++i)
6861 	RTVEC_ELT (vec, i) = GEN_INT (i % u);
6862       tmp = gen_rtx_CONST_VECTOR (qimode, vec);
6863       sel_qi = expand_simple_binop (qimode, PLUS, sel, tmp,
6864 				    sel, 0, OPTAB_DIRECT);
6865       gcc_assert (sel_qi != NULL);
6866     }
6867 
6868   tmp = mode != qimode ? gen_reg_rtx (qimode) : target;
6869   tmp = expand_vec_perm_1 (icode, tmp, gen_lowpart (qimode, v0),
6870 			   gen_lowpart (qimode, v1), sel_qi);
6871   if (tmp)
6872     tmp = gen_lowpart (mode, tmp);
6873   return tmp;
6874 }
6875 
6876 /* Return insn code for a conditional operator with a comparison in
6877    mode CMODE, unsigned if UNS is true, resulting in a value of mode VMODE.  */
6878 
6879 static inline enum insn_code
6880 get_vcond_icode (machine_mode vmode, machine_mode cmode, bool uns)
6881 {
6882   enum insn_code icode = CODE_FOR_nothing;
6883   if (uns)
6884     icode = convert_optab_handler (vcondu_optab, vmode, cmode);
6885   else
6886     icode = convert_optab_handler (vcond_optab, vmode, cmode);
6887   return icode;
6888 }
6889 
6890 /* Return TRUE iff, appropriate vector insns are available
6891    for vector cond expr with vector type VALUE_TYPE and a comparison
6892    with operand vector types in CMP_OP_TYPE.  */
6893 
6894 bool
6895 expand_vec_cond_expr_p (tree value_type, tree cmp_op_type)
6896 {
6897   machine_mode value_mode = TYPE_MODE (value_type);
6898   machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
6899   if (GET_MODE_SIZE (value_mode) != GET_MODE_SIZE (cmp_op_mode)
6900       || GET_MODE_NUNITS (value_mode) != GET_MODE_NUNITS (cmp_op_mode)
6901       || get_vcond_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type),
6902 			  TYPE_UNSIGNED (cmp_op_type)) == CODE_FOR_nothing)
6903     return false;
6904   return true;
6905 }
6906 
6907 /* Generate insns for a VEC_COND_EXPR, given its TYPE and its
6908    three operands.  */
6909 
6910 rtx
6911 expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
6912 		      rtx target)
6913 {
6914   struct expand_operand ops[6];
6915   enum insn_code icode;
6916   rtx comparison, rtx_op1, rtx_op2;
6917   machine_mode mode = TYPE_MODE (vec_cond_type);
6918   machine_mode cmp_op_mode;
6919   bool unsignedp;
6920   tree op0a, op0b;
6921   enum tree_code tcode;
6922 
6923   if (COMPARISON_CLASS_P (op0))
6924     {
6925       op0a = TREE_OPERAND (op0, 0);
6926       op0b = TREE_OPERAND (op0, 1);
6927       tcode = TREE_CODE (op0);
6928     }
6929   else
6930     {
6931       /* Fake op0 < 0.  */
6932       gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (op0)));
6933       op0a = op0;
6934       op0b = build_zero_cst (TREE_TYPE (op0));
6935       tcode = LT_EXPR;
6936     }
6937   unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
6938   cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
6939 
6940 
6941   gcc_assert (GET_MODE_SIZE (mode) == GET_MODE_SIZE (cmp_op_mode)
6942 	      && GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (cmp_op_mode));
6943 
6944   icode = get_vcond_icode (mode, cmp_op_mode, unsignedp);
6945   if (icode == CODE_FOR_nothing)
6946     return 0;
6947 
6948   comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode);
6949   rtx_op1 = expand_normal (op1);
6950   rtx_op2 = expand_normal (op2);
6951 
6952   create_output_operand (&ops[0], target, mode);
6953   create_input_operand (&ops[1], rtx_op1, mode);
6954   create_input_operand (&ops[2], rtx_op2, mode);
6955   create_fixed_operand (&ops[3], comparison);
6956   create_fixed_operand (&ops[4], XEXP (comparison, 0));
6957   create_fixed_operand (&ops[5], XEXP (comparison, 1));
6958   expand_insn (icode, 6, ops);
6959   return ops[0].value;
6960 }
6961 
6962 /* Return non-zero if a highpart multiply is supported of can be synthisized.
6963    For the benefit of expand_mult_highpart, the return value is 1 for direct,
6964    2 for even/odd widening, and 3 for hi/lo widening.  */
6965 
6966 int
6967 can_mult_highpart_p (machine_mode mode, bool uns_p)
6968 {
6969   optab op;
6970   unsigned char *sel;
6971   unsigned i, nunits;
6972 
6973   op = uns_p ? umul_highpart_optab : smul_highpart_optab;
6974   if (optab_handler (op, mode) != CODE_FOR_nothing)
6975     return 1;
6976 
6977   /* If the mode is an integral vector, synth from widening operations.  */
6978   if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
6979     return 0;
6980 
6981   nunits = GET_MODE_NUNITS (mode);
6982   sel = XALLOCAVEC (unsigned char, nunits);
6983 
6984   op = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
6985   if (optab_handler (op, mode) != CODE_FOR_nothing)
6986     {
6987       op = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
6988       if (optab_handler (op, mode) != CODE_FOR_nothing)
6989 	{
6990 	  for (i = 0; i < nunits; ++i)
6991 	    sel[i] = !BYTES_BIG_ENDIAN + (i & ~1) + ((i & 1) ? nunits : 0);
6992 	  if (can_vec_perm_p (mode, false, sel))
6993 	    return 2;
6994 	}
6995     }
6996 
6997   op = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
6998   if (optab_handler (op, mode) != CODE_FOR_nothing)
6999     {
7000       op = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
7001       if (optab_handler (op, mode) != CODE_FOR_nothing)
7002 	{
7003 	  for (i = 0; i < nunits; ++i)
7004 	    sel[i] = 2 * i + (BYTES_BIG_ENDIAN ? 0 : 1);
7005 	  if (can_vec_perm_p (mode, false, sel))
7006 	    return 3;
7007 	}
7008     }
7009 
7010   return 0;
7011 }
7012 
7013 /* Expand a highpart multiply.  */
7014 
7015 rtx
7016 expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
7017 		      rtx target, bool uns_p)
7018 {
7019   struct expand_operand eops[3];
7020   enum insn_code icode;
7021   int method, i, nunits;
7022   machine_mode wmode;
7023   rtx m1, m2, perm;
7024   optab tab1, tab2;
7025   rtvec v;
7026 
7027   method = can_mult_highpart_p (mode, uns_p);
7028   switch (method)
7029     {
7030     case 0:
7031       return NULL_RTX;
7032     case 1:
7033       tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab;
7034       return expand_binop (mode, tab1, op0, op1, target, uns_p,
7035 			   OPTAB_LIB_WIDEN);
7036     case 2:
7037       tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab;
7038       tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab;
7039       break;
7040     case 3:
7041       tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
7042       tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
7043       if (BYTES_BIG_ENDIAN)
7044 	{
7045 	  optab t = tab1;
7046 	  tab1 = tab2;
7047 	  tab2 = t;
7048 	}
7049       break;
7050     default:
7051       gcc_unreachable ();
7052     }
7053 
7054   icode = optab_handler (tab1, mode);
7055   nunits = GET_MODE_NUNITS (mode);
7056   wmode = insn_data[icode].operand[0].mode;
7057   gcc_checking_assert (2 * GET_MODE_NUNITS (wmode) == nunits);
7058   gcc_checking_assert (GET_MODE_SIZE (wmode) == GET_MODE_SIZE (mode));
7059 
7060   create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
7061   create_input_operand (&eops[1], op0, mode);
7062   create_input_operand (&eops[2], op1, mode);
7063   expand_insn (icode, 3, eops);
7064   m1 = gen_lowpart (mode, eops[0].value);
7065 
7066   create_output_operand (&eops[0], gen_reg_rtx (wmode), wmode);
7067   create_input_operand (&eops[1], op0, mode);
7068   create_input_operand (&eops[2], op1, mode);
7069   expand_insn (optab_handler (tab2, mode), 3, eops);
7070   m2 = gen_lowpart (mode, eops[0].value);
7071 
7072   v = rtvec_alloc (nunits);
7073   if (method == 2)
7074     {
7075       for (i = 0; i < nunits; ++i)
7076 	RTVEC_ELT (v, i) = GEN_INT (!BYTES_BIG_ENDIAN + (i & ~1)
7077 				    + ((i & 1) ? nunits : 0));
7078     }
7079   else
7080     {
7081       for (i = 0; i < nunits; ++i)
7082 	RTVEC_ELT (v, i) = GEN_INT (2 * i + (BYTES_BIG_ENDIAN ? 0 : 1));
7083     }
7084   perm = gen_rtx_CONST_VECTOR (mode, v);
7085 
7086   return expand_vec_perm (mode, m1, m2, perm, target);
7087 }
7088 
7089 /* Return true if target supports vector masked load/store for mode.  */
7090 bool
7091 can_vec_mask_load_store_p (machine_mode mode, bool is_load)
7092 {
7093   optab op = is_load ? maskload_optab : maskstore_optab;
7094   machine_mode vmode;
7095   unsigned int vector_sizes;
7096 
7097   /* If mode is vector mode, check it directly.  */
7098   if (VECTOR_MODE_P (mode))
7099     return optab_handler (op, mode) != CODE_FOR_nothing;
7100 
7101   /* Otherwise, return true if there is some vector mode with
7102      the mask load/store supported.  */
7103 
7104   /* See if there is any chance the mask load or store might be
7105      vectorized.  If not, punt.  */
7106   vmode = targetm.vectorize.preferred_simd_mode (mode);
7107   if (!VECTOR_MODE_P (vmode))
7108     return false;
7109 
7110   if (optab_handler (op, vmode) != CODE_FOR_nothing)
7111     return true;
7112 
7113   vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
7114   while (vector_sizes != 0)
7115     {
7116       unsigned int cur = 1 << floor_log2 (vector_sizes);
7117       vector_sizes &= ~cur;
7118       if (cur <= GET_MODE_SIZE (mode))
7119 	continue;
7120       vmode = mode_for_vector (mode, cur / GET_MODE_SIZE (mode));
7121       if (VECTOR_MODE_P (vmode)
7122 	  && optab_handler (op, vmode) != CODE_FOR_nothing)
7123 	return true;
7124     }
7125   return false;
7126 }
7127 
7128 /* Return true if there is a compare_and_swap pattern.  */
7129 
7130 bool
7131 can_compare_and_swap_p (machine_mode mode, bool allow_libcall)
7132 {
7133   enum insn_code icode;
7134 
7135   /* Check for __atomic_compare_and_swap.  */
7136   icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7137   if (icode != CODE_FOR_nothing)
7138     return true;
7139 
7140   /* Check for __sync_compare_and_swap.  */
7141   icode = optab_handler (sync_compare_and_swap_optab, mode);
7142   if (icode != CODE_FOR_nothing)
7143     return true;
7144   if (allow_libcall && optab_libfunc (sync_compare_and_swap_optab, mode))
7145     return true;
7146 
7147   /* No inline compare and swap.  */
7148   return false;
7149 }
7150 
7151 /* Return true if an atomic exchange can be performed.  */
7152 
7153 bool
7154 can_atomic_exchange_p (machine_mode mode, bool allow_libcall)
7155 {
7156   enum insn_code icode;
7157 
7158   /* Check for __atomic_exchange.  */
7159   icode = direct_optab_handler (atomic_exchange_optab, mode);
7160   if (icode != CODE_FOR_nothing)
7161     return true;
7162 
7163   /* Don't check __sync_test_and_set, as on some platforms that
7164      has reduced functionality.  Targets that really do support
7165      a proper exchange should simply be updated to the __atomics.  */
7166 
7167   return can_compare_and_swap_p (mode, allow_libcall);
7168 }
7169 
7170 
7171 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
7172    pattern.  */
7173 
7174 static void
7175 find_cc_set (rtx x, const_rtx pat, void *data)
7176 {
7177   if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
7178       && GET_CODE (pat) == SET)
7179     {
7180       rtx *p_cc_reg = (rtx *) data;
7181       gcc_assert (!*p_cc_reg);
7182       *p_cc_reg = x;
7183     }
7184 }
7185 
7186 /* This is a helper function for the other atomic operations.  This function
7187    emits a loop that contains SEQ that iterates until a compare-and-swap
7188    operation at the end succeeds.  MEM is the memory to be modified.  SEQ is
7189    a set of instructions that takes a value from OLD_REG as an input and
7190    produces a value in NEW_REG as an output.  Before SEQ, OLD_REG will be
7191    set to the current contents of MEM.  After SEQ, a compare-and-swap will
7192    attempt to update MEM with NEW_REG.  The function returns true when the
7193    loop was generated successfully.  */
7194 
7195 static bool
7196 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7197 {
7198   machine_mode mode = GET_MODE (mem);
7199   rtx_code_label *label;
7200   rtx cmp_reg, success, oldval;
7201 
7202   /* The loop we want to generate looks like
7203 
7204 	cmp_reg = mem;
7205       label:
7206         old_reg = cmp_reg;
7207 	seq;
7208 	(success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
7209 	if (success)
7210 	  goto label;
7211 
7212      Note that we only do the plain load from memory once.  Subsequent
7213      iterations use the value loaded by the compare-and-swap pattern.  */
7214 
7215   label = gen_label_rtx ();
7216   cmp_reg = gen_reg_rtx (mode);
7217 
7218   emit_move_insn (cmp_reg, mem);
7219   emit_label (label);
7220   emit_move_insn (old_reg, cmp_reg);
7221   if (seq)
7222     emit_insn (seq);
7223 
7224   success = NULL_RTX;
7225   oldval = cmp_reg;
7226   if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
7227 				       new_reg, false, MEMMODEL_SYNC_SEQ_CST,
7228 				       MEMMODEL_RELAXED))
7229     return false;
7230 
7231   if (oldval != cmp_reg)
7232     emit_move_insn (cmp_reg, oldval);
7233 
7234   /* Mark this jump predicted not taken.  */
7235   emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
7236 			   GET_MODE (success), 1, label, 0);
7237   return true;
7238 }
7239 
7240 
7241 /* This function tries to emit an atomic_exchange intruction.  VAL is written
7242    to *MEM using memory model MODEL. The previous contents of *MEM are returned,
7243    using TARGET if possible.  */
7244 
7245 static rtx
7246 maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7247 {
7248   machine_mode mode = GET_MODE (mem);
7249   enum insn_code icode;
7250 
7251   /* If the target supports the exchange directly, great.  */
7252   icode = direct_optab_handler (atomic_exchange_optab, mode);
7253   if (icode != CODE_FOR_nothing)
7254     {
7255       struct expand_operand ops[4];
7256 
7257       create_output_operand (&ops[0], target, mode);
7258       create_fixed_operand (&ops[1], mem);
7259       create_input_operand (&ops[2], val, mode);
7260       create_integer_operand (&ops[3], model);
7261       if (maybe_expand_insn (icode, 4, ops))
7262 	return ops[0].value;
7263     }
7264 
7265   return NULL_RTX;
7266 }
7267 
7268 /* This function tries to implement an atomic exchange operation using
7269    __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
7270    The previous contents of *MEM are returned, using TARGET if possible.
7271    Since this instructionn is an acquire barrier only, stronger memory
7272    models may require additional barriers to be emitted.  */
7273 
7274 static rtx
7275 maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
7276 				   enum memmodel model)
7277 {
7278   machine_mode mode = GET_MODE (mem);
7279   enum insn_code icode;
7280   rtx_insn *last_insn = get_last_insn ();
7281 
7282   icode = optab_handler (sync_lock_test_and_set_optab, mode);
7283 
7284   /* Legacy sync_lock_test_and_set is an acquire barrier.  If the pattern
7285      exists, and the memory model is stronger than acquire, add a release
7286      barrier before the instruction.  */
7287 
7288   if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model))
7289     expand_mem_thread_fence (model);
7290 
7291   if (icode != CODE_FOR_nothing)
7292     {
7293       struct expand_operand ops[3];
7294       create_output_operand (&ops[0], target, mode);
7295       create_fixed_operand (&ops[1], mem);
7296       create_input_operand (&ops[2], val, mode);
7297       if (maybe_expand_insn (icode, 3, ops))
7298 	return ops[0].value;
7299     }
7300 
7301   /* If an external test-and-set libcall is provided, use that instead of
7302      any external compare-and-swap that we might get from the compare-and-
7303      swap-loop expansion later.  */
7304   if (!can_compare_and_swap_p (mode, false))
7305     {
7306       rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
7307       if (libfunc != NULL)
7308 	{
7309 	  rtx addr;
7310 
7311 	  addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7312 	  return emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7313 					  mode, 2, addr, ptr_mode,
7314 					  val, mode);
7315 	}
7316     }
7317 
7318   /* If the test_and_set can't be emitted, eliminate any barrier that might
7319      have been emitted.  */
7320   delete_insns_since (last_insn);
7321   return NULL_RTX;
7322 }
7323 
7324 /* This function tries to implement an atomic exchange operation using a
7325    compare_and_swap loop. VAL is written to *MEM.  The previous contents of
7326    *MEM are returned, using TARGET if possible.  No memory model is required
7327    since a compare_and_swap loop is seq-cst.  */
7328 
7329 static rtx
7330 maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
7331 {
7332   machine_mode mode = GET_MODE (mem);
7333 
7334   if (can_compare_and_swap_p (mode, true))
7335     {
7336       if (!target || !register_operand (target, mode))
7337 	target = gen_reg_rtx (mode);
7338       if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7339 	return target;
7340     }
7341 
7342   return NULL_RTX;
7343 }
7344 
7345 /* This function tries to implement an atomic test-and-set operation
7346    using the atomic_test_and_set instruction pattern.  A boolean value
7347    is returned from the operation, using TARGET if possible.  */
7348 
7349 #ifndef HAVE_atomic_test_and_set
7350 #define HAVE_atomic_test_and_set 0
7351 #define CODE_FOR_atomic_test_and_set CODE_FOR_nothing
7352 #endif
7353 
7354 static rtx
7355 maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7356 {
7357   machine_mode pat_bool_mode;
7358   struct expand_operand ops[3];
7359 
7360   if (!HAVE_atomic_test_and_set)
7361     return NULL_RTX;
7362 
7363   /* While we always get QImode from __atomic_test_and_set, we get
7364      other memory modes from __sync_lock_test_and_set.  Note that we
7365      use no endian adjustment here.  This matches the 4.6 behavior
7366      in the Sparc backend.  */
7367   gcc_checking_assert
7368     (insn_data[CODE_FOR_atomic_test_and_set].operand[1].mode == QImode);
7369   if (GET_MODE (mem) != QImode)
7370     mem = adjust_address_nv (mem, QImode, 0);
7371 
7372   pat_bool_mode = insn_data[CODE_FOR_atomic_test_and_set].operand[0].mode;
7373   create_output_operand (&ops[0], target, pat_bool_mode);
7374   create_fixed_operand (&ops[1], mem);
7375   create_integer_operand (&ops[2], model);
7376 
7377   if (maybe_expand_insn (CODE_FOR_atomic_test_and_set, 3, ops))
7378     return ops[0].value;
7379   return NULL_RTX;
7380 }
7381 
7382 /* This function expands the legacy _sync_lock test_and_set operation which is
7383    generally an atomic exchange.  Some limited targets only allow the
7384    constant 1 to be stored.  This is an ACQUIRE operation.
7385 
7386    TARGET is an optional place to stick the return value.
7387    MEM is where VAL is stored.  */
7388 
7389 rtx
7390 expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
7391 {
7392   rtx ret;
7393 
7394   /* Try an atomic_exchange first.  */
7395   ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_SYNC_ACQUIRE);
7396   if (ret)
7397     return ret;
7398 
7399   ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
7400 					   MEMMODEL_SYNC_ACQUIRE);
7401   if (ret)
7402     return ret;
7403 
7404   ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7405   if (ret)
7406     return ret;
7407 
7408   /* If there are no other options, try atomic_test_and_set if the value
7409      being stored is 1.  */
7410   if (val == const1_rtx)
7411     ret = maybe_emit_atomic_test_and_set (target, mem, MEMMODEL_SYNC_ACQUIRE);
7412 
7413   return ret;
7414 }
7415 
7416 /* This function expands the atomic test_and_set operation:
7417    atomically store a boolean TRUE into MEM and return the previous value.
7418 
7419    MEMMODEL is the memory model variant to use.
7420    TARGET is an optional place to stick the return value.  */
7421 
7422 rtx
7423 expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
7424 {
7425   machine_mode mode = GET_MODE (mem);
7426   rtx ret, trueval, subtarget;
7427 
7428   ret = maybe_emit_atomic_test_and_set (target, mem, model);
7429   if (ret)
7430     return ret;
7431 
7432   /* Be binary compatible with non-default settings of trueval, and different
7433      cpu revisions.  E.g. one revision may have atomic-test-and-set, but
7434      another only has atomic-exchange.  */
7435   if (targetm.atomic_test_and_set_trueval == 1)
7436     {
7437       trueval = const1_rtx;
7438       subtarget = target ? target : gen_reg_rtx (mode);
7439     }
7440   else
7441     {
7442       trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode);
7443       subtarget = gen_reg_rtx (mode);
7444     }
7445 
7446   /* Try the atomic-exchange optab...  */
7447   ret = maybe_emit_atomic_exchange (subtarget, mem, trueval, model);
7448 
7449   /* ... then an atomic-compare-and-swap loop ... */
7450   if (!ret)
7451     ret = maybe_emit_compare_and_swap_exchange_loop (subtarget, mem, trueval);
7452 
7453   /* ... before trying the vaguely defined legacy lock_test_and_set. */
7454   if (!ret)
7455     ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, trueval, model);
7456 
7457   /* Recall that the legacy lock_test_and_set optab was allowed to do magic
7458      things with the value 1.  Thus we try again without trueval.  */
7459   if (!ret && targetm.atomic_test_and_set_trueval != 1)
7460     ret = maybe_emit_sync_lock_test_and_set (subtarget, mem, const1_rtx, model);
7461 
7462   /* Failing all else, assume a single threaded environment and simply
7463      perform the operation.  */
7464   if (!ret)
7465     {
7466       /* If the result is ignored skip the move to target.  */
7467       if (subtarget != const0_rtx)
7468         emit_move_insn (subtarget, mem);
7469 
7470       emit_move_insn (mem, trueval);
7471       ret = subtarget;
7472     }
7473 
7474   /* Recall that have to return a boolean value; rectify if trueval
7475      is not exactly one.  */
7476   if (targetm.atomic_test_and_set_trueval != 1)
7477     ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1);
7478 
7479   return ret;
7480 }
7481 
7482 /* This function expands the atomic exchange operation:
7483    atomically store VAL in MEM and return the previous value in MEM.
7484 
7485    MEMMODEL is the memory model variant to use.
7486    TARGET is an optional place to stick the return value.  */
7487 
7488 rtx
7489 expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
7490 {
7491   rtx ret;
7492 
7493   ret = maybe_emit_atomic_exchange (target, mem, val, model);
7494 
7495   /* Next try a compare-and-swap loop for the exchange.  */
7496   if (!ret)
7497     ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
7498 
7499   return ret;
7500 }
7501 
7502 /* This function expands the atomic compare exchange operation:
7503 
7504    *PTARGET_BOOL is an optional place to store the boolean success/failure.
7505    *PTARGET_OVAL is an optional place to store the old value from memory.
7506    Both target parameters may be NULL or const0_rtx to indicate that we do
7507    not care about that return value.  Both target parameters are updated on
7508    success to the actual location of the corresponding result.
7509 
7510    MEMMODEL is the memory model variant to use.
7511 
7512    The return value of the function is true for success.  */
7513 
7514 bool
7515 expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
7516 				rtx mem, rtx expected, rtx desired,
7517 				bool is_weak, enum memmodel succ_model,
7518 				enum memmodel fail_model)
7519 {
7520   machine_mode mode = GET_MODE (mem);
7521   struct expand_operand ops[8];
7522   enum insn_code icode;
7523   rtx target_oval, target_bool = NULL_RTX;
7524   rtx libfunc;
7525 
7526   /* Load expected into a register for the compare and swap.  */
7527   if (MEM_P (expected))
7528     expected = copy_to_reg (expected);
7529 
7530   /* Make sure we always have some place to put the return oldval.
7531      Further, make sure that place is distinct from the input expected,
7532      just in case we need that path down below.  */
7533   if (ptarget_oval && *ptarget_oval == const0_rtx)
7534     ptarget_oval = NULL;
7535 
7536   if (ptarget_oval == NULL
7537       || (target_oval = *ptarget_oval) == NULL
7538       || reg_overlap_mentioned_p (expected, target_oval))
7539     target_oval = gen_reg_rtx (mode);
7540 
7541   icode = direct_optab_handler (atomic_compare_and_swap_optab, mode);
7542   if (icode != CODE_FOR_nothing)
7543     {
7544       machine_mode bool_mode = insn_data[icode].operand[0].mode;
7545 
7546       if (ptarget_bool && *ptarget_bool == const0_rtx)
7547 	ptarget_bool = NULL;
7548 
7549       /* Make sure we always have a place for the bool operand.  */
7550       if (ptarget_bool == NULL
7551 	  || (target_bool = *ptarget_bool) == NULL
7552 	  || GET_MODE (target_bool) != bool_mode)
7553 	target_bool = gen_reg_rtx (bool_mode);
7554 
7555       /* Emit the compare_and_swap.  */
7556       create_output_operand (&ops[0], target_bool, bool_mode);
7557       create_output_operand (&ops[1], target_oval, mode);
7558       create_fixed_operand (&ops[2], mem);
7559       create_input_operand (&ops[3], expected, mode);
7560       create_input_operand (&ops[4], desired, mode);
7561       create_integer_operand (&ops[5], is_weak);
7562       create_integer_operand (&ops[6], succ_model);
7563       create_integer_operand (&ops[7], fail_model);
7564       if (maybe_expand_insn (icode, 8, ops))
7565 	{
7566 	  /* Return success/failure.  */
7567 	  target_bool = ops[0].value;
7568 	  target_oval = ops[1].value;
7569 	  goto success;
7570 	}
7571     }
7572 
7573   /* Otherwise fall back to the original __sync_val_compare_and_swap
7574      which is always seq-cst.  */
7575   icode = optab_handler (sync_compare_and_swap_optab, mode);
7576   if (icode != CODE_FOR_nothing)
7577     {
7578       rtx cc_reg;
7579 
7580       create_output_operand (&ops[0], target_oval, mode);
7581       create_fixed_operand (&ops[1], mem);
7582       create_input_operand (&ops[2], expected, mode);
7583       create_input_operand (&ops[3], desired, mode);
7584       if (!maybe_expand_insn (icode, 4, ops))
7585 	return false;
7586 
7587       target_oval = ops[0].value;
7588 
7589       /* If the caller isn't interested in the boolean return value,
7590 	 skip the computation of it.  */
7591       if (ptarget_bool == NULL)
7592 	goto success;
7593 
7594       /* Otherwise, work out if the compare-and-swap succeeded.  */
7595       cc_reg = NULL_RTX;
7596       if (have_insn_for (COMPARE, CCmode))
7597 	note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7598       if (cc_reg)
7599 	{
7600 	  target_bool = emit_store_flag_force (target_bool, EQ, cc_reg,
7601 					       const0_rtx, VOIDmode, 0, 1);
7602 	  goto success;
7603 	}
7604       goto success_bool_from_val;
7605     }
7606 
7607   /* Also check for library support for __sync_val_compare_and_swap.  */
7608   libfunc = optab_libfunc (sync_compare_and_swap_optab, mode);
7609   if (libfunc != NULL)
7610     {
7611       rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
7612       rtx target = emit_library_call_value (libfunc, NULL_RTX, LCT_NORMAL,
7613 					    mode, 3, addr, ptr_mode,
7614 					    expected, mode, desired, mode);
7615       emit_move_insn (target_oval, target);
7616 
7617       /* Compute the boolean return value only if requested.  */
7618       if (ptarget_bool)
7619 	goto success_bool_from_val;
7620       else
7621 	goto success;
7622     }
7623 
7624   /* Failure.  */
7625   return false;
7626 
7627  success_bool_from_val:
7628    target_bool = emit_store_flag_force (target_bool, EQ, target_oval,
7629 					expected, VOIDmode, 1, 1);
7630  success:
7631   /* Make sure that the oval output winds up where the caller asked.  */
7632   if (ptarget_oval)
7633     *ptarget_oval = target_oval;
7634   if (ptarget_bool)
7635     *ptarget_bool = target_bool;
7636   return true;
7637 }
7638 
7639 /* Generate asm volatile("" : : : "memory") as the memory barrier.  */
7640 
7641 static void
7642 expand_asm_memory_barrier (void)
7643 {
7644   rtx asm_op, clob;
7645 
7646   asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, empty_string, empty_string, 0,
7647 				 rtvec_alloc (0), rtvec_alloc (0),
7648 				 rtvec_alloc (0), UNKNOWN_LOCATION);
7649   MEM_VOLATILE_P (asm_op) = 1;
7650 
7651   clob = gen_rtx_SCRATCH (VOIDmode);
7652   clob = gen_rtx_MEM (BLKmode, clob);
7653   clob = gen_rtx_CLOBBER (VOIDmode, clob);
7654 
7655   emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob)));
7656 }
7657 
7658 /* This routine will either emit the mem_thread_fence pattern or issue a
7659    sync_synchronize to generate a fence for memory model MEMMODEL.  */
7660 
7661 #ifndef HAVE_mem_thread_fence
7662 # define HAVE_mem_thread_fence 0
7663 # define gen_mem_thread_fence(x) (gcc_unreachable (), NULL_RTX)
7664 #endif
7665 #ifndef HAVE_memory_barrier
7666 # define HAVE_memory_barrier 0
7667 # define gen_memory_barrier()  (gcc_unreachable (), NULL_RTX)
7668 #endif
7669 
7670 void
7671 expand_mem_thread_fence (enum memmodel model)
7672 {
7673   if (HAVE_mem_thread_fence)
7674     emit_insn (gen_mem_thread_fence (GEN_INT (model)));
7675   else if (!is_mm_relaxed (model))
7676     {
7677       if (HAVE_memory_barrier)
7678 	emit_insn (gen_memory_barrier ());
7679       else if (synchronize_libfunc != NULL_RTX)
7680 	emit_library_call (synchronize_libfunc, LCT_NORMAL, VOIDmode, 0);
7681       else
7682 	expand_asm_memory_barrier ();
7683     }
7684 }
7685 
7686 /* This routine will either emit the mem_signal_fence pattern or issue a
7687    sync_synchronize to generate a fence for memory model MEMMODEL.  */
7688 
7689 #ifndef HAVE_mem_signal_fence
7690 # define HAVE_mem_signal_fence 0
7691 # define gen_mem_signal_fence(x) (gcc_unreachable (), NULL_RTX)
7692 #endif
7693 
7694 void
7695 expand_mem_signal_fence (enum memmodel model)
7696 {
7697   if (HAVE_mem_signal_fence)
7698     emit_insn (gen_mem_signal_fence (GEN_INT (model)));
7699   else if (!is_mm_relaxed (model))
7700     {
7701       /* By default targets are coherent between a thread and the signal
7702 	 handler running on the same thread.  Thus this really becomes a
7703 	 compiler barrier, in that stores must not be sunk past
7704 	 (or raised above) a given point.  */
7705       expand_asm_memory_barrier ();
7706     }
7707 }
7708 
7709 /* This function expands the atomic load operation:
7710    return the atomically loaded value in MEM.
7711 
7712    MEMMODEL is the memory model variant to use.
7713    TARGET is an option place to stick the return value.  */
7714 
7715 rtx
7716 expand_atomic_load (rtx target, rtx mem, enum memmodel model)
7717 {
7718   machine_mode mode = GET_MODE (mem);
7719   enum insn_code icode;
7720 
7721   /* If the target supports the load directly, great.  */
7722   icode = direct_optab_handler (atomic_load_optab, mode);
7723   if (icode != CODE_FOR_nothing)
7724     {
7725       struct expand_operand ops[3];
7726 
7727       create_output_operand (&ops[0], target, mode);
7728       create_fixed_operand (&ops[1], mem);
7729       create_integer_operand (&ops[2], model);
7730       if (maybe_expand_insn (icode, 3, ops))
7731 	return ops[0].value;
7732     }
7733 
7734   /* If the size of the object is greater than word size on this target,
7735      then we assume that a load will not be atomic.  */
7736   if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
7737     {
7738       /* Issue val = compare_and_swap (mem, 0, 0).
7739 	 This may cause the occasional harmless store of 0 when the value is
7740 	 already 0, but it seems to be OK according to the standards guys.  */
7741       if (expand_atomic_compare_and_swap (NULL, &target, mem, const0_rtx,
7742 					  const0_rtx, false, model, model))
7743 	return target;
7744       else
7745       /* Otherwise there is no atomic load, leave the library call.  */
7746         return NULL_RTX;
7747     }
7748 
7749   /* Otherwise assume loads are atomic, and emit the proper barriers.  */
7750   if (!target || target == const0_rtx)
7751     target = gen_reg_rtx (mode);
7752 
7753   /* For SEQ_CST, emit a barrier before the load.  */
7754   if (is_mm_seq_cst (model))
7755     expand_mem_thread_fence (model);
7756 
7757   emit_move_insn (target, mem);
7758 
7759   /* Emit the appropriate barrier after the load.  */
7760   expand_mem_thread_fence (model);
7761 
7762   return target;
7763 }
7764 
7765 /* This function expands the atomic store operation:
7766    Atomically store VAL in MEM.
7767    MEMMODEL is the memory model variant to use.
7768    USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7769    function returns const0_rtx if a pattern was emitted.  */
7770 
7771 rtx
7772 expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
7773 {
7774   machine_mode mode = GET_MODE (mem);
7775   enum insn_code icode;
7776   struct expand_operand ops[3];
7777 
7778   /* If the target supports the store directly, great.  */
7779   icode = direct_optab_handler (atomic_store_optab, mode);
7780   if (icode != CODE_FOR_nothing)
7781     {
7782       create_fixed_operand (&ops[0], mem);
7783       create_input_operand (&ops[1], val, mode);
7784       create_integer_operand (&ops[2], model);
7785       if (maybe_expand_insn (icode, 3, ops))
7786 	return const0_rtx;
7787     }
7788 
7789   /* If using __sync_lock_release is a viable alternative, try it.  */
7790   if (use_release)
7791     {
7792       icode = direct_optab_handler (sync_lock_release_optab, mode);
7793       if (icode != CODE_FOR_nothing)
7794 	{
7795 	  create_fixed_operand (&ops[0], mem);
7796 	  create_input_operand (&ops[1], const0_rtx, mode);
7797 	  if (maybe_expand_insn (icode, 2, ops))
7798 	    {
7799 	      /* lock_release is only a release barrier.  */
7800 	      if (is_mm_seq_cst (model))
7801 		expand_mem_thread_fence (model);
7802 	      return const0_rtx;
7803 	    }
7804 	}
7805     }
7806 
7807   /* If the size of the object is greater than word size on this target,
7808      a default store will not be atomic, Try a mem_exchange and throw away
7809      the result.  If that doesn't work, don't do anything.  */
7810   if (GET_MODE_PRECISION (mode) > BITS_PER_WORD)
7811     {
7812       rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
7813       if (!target)
7814         target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
7815       if (target)
7816         return const0_rtx;
7817       else
7818         return NULL_RTX;
7819     }
7820 
7821   /* Otherwise assume stores are atomic, and emit the proper barriers.  */
7822   expand_mem_thread_fence (model);
7823 
7824   emit_move_insn (mem, val);
7825 
7826   /* For SEQ_CST, also emit a barrier after the store.  */
7827   if (is_mm_seq_cst (model))
7828     expand_mem_thread_fence (model);
7829 
7830   return const0_rtx;
7831 }
7832 
7833 
7834 /* Structure containing the pointers and values required to process the
7835    various forms of the atomic_fetch_op and atomic_op_fetch builtins.  */
7836 
7837 struct atomic_op_functions
7838 {
7839   direct_optab mem_fetch_before;
7840   direct_optab mem_fetch_after;
7841   direct_optab mem_no_result;
7842   optab fetch_before;
7843   optab fetch_after;
7844   direct_optab no_result;
7845   enum rtx_code reverse_code;
7846 };
7847 
7848 
7849 /* Fill in structure pointed to by OP with the various optab entries for an
7850    operation of type CODE.  */
7851 
7852 static void
7853 get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code)
7854 {
7855   gcc_assert (op!= NULL);
7856 
7857   /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7858      in the source code during compilation, and the optab entries are not
7859      computable until runtime.  Fill in the values at runtime.  */
7860   switch (code)
7861     {
7862     case PLUS:
7863       op->mem_fetch_before = atomic_fetch_add_optab;
7864       op->mem_fetch_after = atomic_add_fetch_optab;
7865       op->mem_no_result = atomic_add_optab;
7866       op->fetch_before = sync_old_add_optab;
7867       op->fetch_after = sync_new_add_optab;
7868       op->no_result = sync_add_optab;
7869       op->reverse_code = MINUS;
7870       break;
7871     case MINUS:
7872       op->mem_fetch_before = atomic_fetch_sub_optab;
7873       op->mem_fetch_after = atomic_sub_fetch_optab;
7874       op->mem_no_result = atomic_sub_optab;
7875       op->fetch_before = sync_old_sub_optab;
7876       op->fetch_after = sync_new_sub_optab;
7877       op->no_result = sync_sub_optab;
7878       op->reverse_code = PLUS;
7879       break;
7880     case XOR:
7881       op->mem_fetch_before = atomic_fetch_xor_optab;
7882       op->mem_fetch_after = atomic_xor_fetch_optab;
7883       op->mem_no_result = atomic_xor_optab;
7884       op->fetch_before = sync_old_xor_optab;
7885       op->fetch_after = sync_new_xor_optab;
7886       op->no_result = sync_xor_optab;
7887       op->reverse_code = XOR;
7888       break;
7889     case AND:
7890       op->mem_fetch_before = atomic_fetch_and_optab;
7891       op->mem_fetch_after = atomic_and_fetch_optab;
7892       op->mem_no_result = atomic_and_optab;
7893       op->fetch_before = sync_old_and_optab;
7894       op->fetch_after = sync_new_and_optab;
7895       op->no_result = sync_and_optab;
7896       op->reverse_code = UNKNOWN;
7897       break;
7898     case IOR:
7899       op->mem_fetch_before = atomic_fetch_or_optab;
7900       op->mem_fetch_after = atomic_or_fetch_optab;
7901       op->mem_no_result = atomic_or_optab;
7902       op->fetch_before = sync_old_ior_optab;
7903       op->fetch_after = sync_new_ior_optab;
7904       op->no_result = sync_ior_optab;
7905       op->reverse_code = UNKNOWN;
7906       break;
7907     case NOT:
7908       op->mem_fetch_before = atomic_fetch_nand_optab;
7909       op->mem_fetch_after = atomic_nand_fetch_optab;
7910       op->mem_no_result = atomic_nand_optab;
7911       op->fetch_before = sync_old_nand_optab;
7912       op->fetch_after = sync_new_nand_optab;
7913       op->no_result = sync_nand_optab;
7914       op->reverse_code = UNKNOWN;
7915       break;
7916     default:
7917       gcc_unreachable ();
7918     }
7919 }
7920 
7921 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7922    using memory order MODEL.  If AFTER is true the operation needs to return
7923    the value of *MEM after the operation, otherwise the previous value.
7924    TARGET is an optional place to place the result.  The result is unused if
7925    it is const0_rtx.
7926    Return the result if there is a better sequence, otherwise NULL_RTX.  */
7927 
7928 static rtx
7929 maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
7930 			 enum memmodel model, bool after)
7931 {
7932   /* If the value is prefetched, or not used, it may be possible to replace
7933      the sequence with a native exchange operation.  */
7934   if (!after || target == const0_rtx)
7935     {
7936       /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m).  */
7937       if (code == AND && val == const0_rtx)
7938         {
7939 	  if (target == const0_rtx)
7940 	    target = gen_reg_rtx (GET_MODE (mem));
7941 	  return maybe_emit_atomic_exchange (target, mem, val, model);
7942 	}
7943 
7944       /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m).  */
7945       if (code == IOR && val == constm1_rtx)
7946         {
7947 	  if (target == const0_rtx)
7948 	    target = gen_reg_rtx (GET_MODE (mem));
7949 	  return maybe_emit_atomic_exchange (target, mem, val, model);
7950 	}
7951     }
7952 
7953   return NULL_RTX;
7954 }
7955 
7956 /* Try to emit an instruction for a specific operation varaition.
7957    OPTAB contains the OP functions.
7958    TARGET is an optional place to return the result. const0_rtx means unused.
7959    MEM is the memory location to operate on.
7960    VAL is the value to use in the operation.
7961    USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7962    MODEL is the memory model, if used.
7963    AFTER is true if the returned result is the value after the operation.  */
7964 
7965 static rtx
7966 maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
7967 	       rtx val, bool use_memmodel, enum memmodel model, bool after)
7968 {
7969   machine_mode mode = GET_MODE (mem);
7970   struct expand_operand ops[4];
7971   enum insn_code icode;
7972   int op_counter = 0;
7973   int num_ops;
7974 
7975   /* Check to see if there is a result returned.  */
7976   if (target == const0_rtx)
7977     {
7978       if (use_memmodel)
7979         {
7980 	  icode = direct_optab_handler (optab->mem_no_result, mode);
7981 	  create_integer_operand (&ops[2], model);
7982 	  num_ops = 3;
7983 	}
7984       else
7985         {
7986 	  icode = direct_optab_handler (optab->no_result, mode);
7987 	  num_ops = 2;
7988 	}
7989     }
7990   /* Otherwise, we need to generate a result.  */
7991   else
7992     {
7993       if (use_memmodel)
7994         {
7995 	  icode = direct_optab_handler (after ? optab->mem_fetch_after
7996 					: optab->mem_fetch_before, mode);
7997 	  create_integer_operand (&ops[3], model);
7998 	  num_ops = 4;
7999 	}
8000       else
8001 	{
8002 	  icode = optab_handler (after ? optab->fetch_after
8003 				 : optab->fetch_before, mode);
8004 	  num_ops = 3;
8005 	}
8006       create_output_operand (&ops[op_counter++], target, mode);
8007     }
8008   if (icode == CODE_FOR_nothing)
8009     return NULL_RTX;
8010 
8011   create_fixed_operand (&ops[op_counter++], mem);
8012   /* VAL may have been promoted to a wider mode.  Shrink it if so.  */
8013   create_convert_operand_to (&ops[op_counter++], val, mode, true);
8014 
8015   if (maybe_expand_insn (icode, num_ops, ops))
8016     return (target == const0_rtx ? const0_rtx : ops[0].value);
8017 
8018   return NULL_RTX;
8019 }
8020 
8021 
8022 /* This function expands an atomic fetch_OP or OP_fetch operation:
8023    TARGET is an option place to stick the return value.  const0_rtx indicates
8024    the result is unused.
8025    atomically fetch MEM, perform the operation with VAL and return it to MEM.
8026    CODE is the operation being performed (OP)
8027    MEMMODEL is the memory model variant to use.
8028    AFTER is true to return the result of the operation (OP_fetch).
8029    AFTER is false to return the value before the operation (fetch_OP).
8030 
8031    This function will *only* generate instructions if there is a direct
8032    optab. No compare and swap loops or libcalls will be generated. */
8033 
8034 static rtx
8035 expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val,
8036 				    enum rtx_code code, enum memmodel model,
8037 				    bool after)
8038 {
8039   machine_mode mode = GET_MODE (mem);
8040   struct atomic_op_functions optab;
8041   rtx result;
8042   bool unused_result = (target == const0_rtx);
8043 
8044   get_atomic_op_for_code (&optab, code);
8045 
8046   /* Check to see if there are any better instructions.  */
8047   result = maybe_optimize_fetch_op (target, mem, val, code, model, after);
8048   if (result)
8049     return result;
8050 
8051   /* Check for the case where the result isn't used and try those patterns.  */
8052   if (unused_result)
8053     {
8054       /* Try the memory model variant first.  */
8055       result = maybe_emit_op (&optab, target, mem, val, true, model, true);
8056       if (result)
8057         return result;
8058 
8059       /* Next try the old style withuot a memory model.  */
8060       result = maybe_emit_op (&optab, target, mem, val, false, model, true);
8061       if (result)
8062         return result;
8063 
8064       /* There is no no-result pattern, so try patterns with a result.  */
8065       target = NULL_RTX;
8066     }
8067 
8068   /* Try the __atomic version.  */
8069   result = maybe_emit_op (&optab, target, mem, val, true, model, after);
8070   if (result)
8071     return result;
8072 
8073   /* Try the older __sync version.  */
8074   result = maybe_emit_op (&optab, target, mem, val, false, model, after);
8075   if (result)
8076     return result;
8077 
8078   /* If the fetch value can be calculated from the other variation of fetch,
8079      try that operation.  */
8080   if (after || unused_result || optab.reverse_code != UNKNOWN)
8081     {
8082       /* Try the __atomic version, then the older __sync version.  */
8083       result = maybe_emit_op (&optab, target, mem, val, true, model, !after);
8084       if (!result)
8085 	result = maybe_emit_op (&optab, target, mem, val, false, model, !after);
8086 
8087       if (result)
8088 	{
8089 	  /* If the result isn't used, no need to do compensation code.  */
8090 	  if (unused_result)
8091 	    return result;
8092 
8093 	  /* Issue compensation code.  Fetch_after  == fetch_before OP val.
8094 	     Fetch_before == after REVERSE_OP val.  */
8095 	  if (!after)
8096 	    code = optab.reverse_code;
8097 	  if (code == NOT)
8098 	    {
8099 	      result = expand_simple_binop (mode, AND, result, val, NULL_RTX,
8100 					    true, OPTAB_LIB_WIDEN);
8101 	      result = expand_simple_unop (mode, NOT, result, target, true);
8102 	    }
8103 	  else
8104 	    result = expand_simple_binop (mode, code, result, val, target,
8105 					  true, OPTAB_LIB_WIDEN);
8106 	  return result;
8107 	}
8108     }
8109 
8110   /* No direct opcode can be generated.  */
8111   return NULL_RTX;
8112 }
8113 
8114 
8115 
8116 /* This function expands an atomic fetch_OP or OP_fetch operation:
8117    TARGET is an option place to stick the return value.  const0_rtx indicates
8118    the result is unused.
8119    atomically fetch MEM, perform the operation with VAL and return it to MEM.
8120    CODE is the operation being performed (OP)
8121    MEMMODEL is the memory model variant to use.
8122    AFTER is true to return the result of the operation (OP_fetch).
8123    AFTER is false to return the value before the operation (fetch_OP).  */
8124 rtx
8125 expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code,
8126 			enum memmodel model, bool after)
8127 {
8128   machine_mode mode = GET_MODE (mem);
8129   rtx result;
8130   bool unused_result = (target == const0_rtx);
8131 
8132   result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model,
8133 					       after);
8134 
8135   if (result)
8136     return result;
8137 
8138   /* Add/sub can be implemented by doing the reverse operation with -(val).  */
8139   if (code == PLUS || code == MINUS)
8140     {
8141       rtx tmp;
8142       enum rtx_code reverse = (code == PLUS ? MINUS : PLUS);
8143 
8144       start_sequence ();
8145       tmp = expand_simple_unop (mode, NEG, val, NULL_RTX, true);
8146       result = expand_atomic_fetch_op_no_fallback (target, mem, tmp, reverse,
8147 						   model, after);
8148       if (result)
8149 	{
8150 	  /* PLUS worked so emit the insns and return.  */
8151 	  tmp = get_insns ();
8152 	  end_sequence ();
8153 	  emit_insn (tmp);
8154           return result;
8155 	}
8156 
8157       /* PLUS did not work, so throw away the negation code and continue.  */
8158       end_sequence ();
8159     }
8160 
8161   /* Try the __sync libcalls only if we can't do compare-and-swap inline.  */
8162   if (!can_compare_and_swap_p (mode, false))
8163     {
8164       rtx libfunc;
8165       bool fixup = false;
8166       enum rtx_code orig_code = code;
8167       struct atomic_op_functions optab;
8168 
8169       get_atomic_op_for_code (&optab, code);
8170       libfunc = optab_libfunc (after ? optab.fetch_after
8171 			       : optab.fetch_before, mode);
8172       if (libfunc == NULL
8173 	  && (after || unused_result || optab.reverse_code != UNKNOWN))
8174 	{
8175 	  fixup = true;
8176 	  if (!after)
8177 	    code = optab.reverse_code;
8178 	  libfunc = optab_libfunc (after ? optab.fetch_before
8179 				   : optab.fetch_after, mode);
8180 	}
8181       if (libfunc != NULL)
8182 	{
8183 	  rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
8184 	  result = emit_library_call_value (libfunc, NULL, LCT_NORMAL, mode,
8185 					    2, addr, ptr_mode, val, mode);
8186 
8187 	  if (!unused_result && fixup)
8188 	    result = expand_simple_binop (mode, code, result, val, target,
8189 					  true, OPTAB_LIB_WIDEN);
8190 	  return result;
8191 	}
8192 
8193       /* We need the original code for any further attempts.  */
8194       code = orig_code;
8195     }
8196 
8197   /* If nothing else has succeeded, default to a compare and swap loop.  */
8198   if (can_compare_and_swap_p (mode, true))
8199     {
8200       rtx_insn *insn;
8201       rtx t0 = gen_reg_rtx (mode), t1;
8202 
8203       start_sequence ();
8204 
8205       /* If the result is used, get a register for it.  */
8206       if (!unused_result)
8207         {
8208 	  if (!target || !register_operand (target, mode))
8209 	    target = gen_reg_rtx (mode);
8210 	  /* If fetch_before, copy the value now.  */
8211 	  if (!after)
8212 	    emit_move_insn (target, t0);
8213 	}
8214       else
8215         target = const0_rtx;
8216 
8217       t1 = t0;
8218       if (code == NOT)
8219         {
8220 	  t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
8221 				    true, OPTAB_LIB_WIDEN);
8222 	  t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
8223 	}
8224       else
8225 	t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, true,
8226 				  OPTAB_LIB_WIDEN);
8227 
8228       /* For after, copy the value now.  */
8229       if (!unused_result && after)
8230         emit_move_insn (target, t1);
8231       insn = get_insns ();
8232       end_sequence ();
8233 
8234       if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
8235         return target;
8236     }
8237 
8238   return NULL_RTX;
8239 }
8240 
8241 /* Return true if OPERAND is suitable for operand number OPNO of
8242    instruction ICODE.  */
8243 
8244 bool
8245 insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand)
8246 {
8247   return (!insn_data[(int) icode].operand[opno].predicate
8248 	  || (insn_data[(int) icode].operand[opno].predicate
8249 	      (operand, insn_data[(int) icode].operand[opno].mode)));
8250 }
8251 
8252 /* TARGET is a target of a multiword operation that we are going to
8253    implement as a series of word-mode operations.  Return true if
8254    TARGET is suitable for this purpose.  */
8255 
8256 bool
8257 valid_multiword_target_p (rtx target)
8258 {
8259   machine_mode mode;
8260   int i;
8261 
8262   mode = GET_MODE (target);
8263   for (i = 0; i < GET_MODE_SIZE (mode); i += UNITS_PER_WORD)
8264     if (!validate_subreg (word_mode, mode, target, i))
8265       return false;
8266   return true;
8267 }
8268 
8269 /* Like maybe_legitimize_operand, but do not change the code of the
8270    current rtx value.  */
8271 
8272 static bool
8273 maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
8274 				    struct expand_operand *op)
8275 {
8276   /* See if the operand matches in its current form.  */
8277   if (insn_operand_matches (icode, opno, op->value))
8278     return true;
8279 
8280   /* If the operand is a memory whose address has no side effects,
8281      try forcing the address into a non-virtual pseudo register.
8282      The check for side effects is important because copy_to_mode_reg
8283      cannot handle things like auto-modified addresses.  */
8284   if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value))
8285     {
8286       rtx addr, mem;
8287 
8288       mem = op->value;
8289       addr = XEXP (mem, 0);
8290       if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER)
8291 	  && !side_effects_p (addr))
8292 	{
8293 	  rtx_insn *last;
8294 	  machine_mode mode;
8295 
8296 	  last = get_last_insn ();
8297 	  mode = get_address_mode (mem);
8298 	  mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr));
8299 	  if (insn_operand_matches (icode, opno, mem))
8300 	    {
8301 	      op->value = mem;
8302 	      return true;
8303 	    }
8304 	  delete_insns_since (last);
8305 	}
8306     }
8307 
8308   return false;
8309 }
8310 
8311 /* Try to make OP match operand OPNO of instruction ICODE.  Return true
8312    on success, storing the new operand value back in OP.  */
8313 
8314 static bool
8315 maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
8316 			  struct expand_operand *op)
8317 {
8318   machine_mode mode, imode;
8319   bool old_volatile_ok, result;
8320 
8321   mode = op->mode;
8322   switch (op->type)
8323     {
8324     case EXPAND_FIXED:
8325       old_volatile_ok = volatile_ok;
8326       volatile_ok = true;
8327       result = maybe_legitimize_operand_same_code (icode, opno, op);
8328       volatile_ok = old_volatile_ok;
8329       return result;
8330 
8331     case EXPAND_OUTPUT:
8332       gcc_assert (mode != VOIDmode);
8333       if (op->value
8334 	  && op->value != const0_rtx
8335 	  && GET_MODE (op->value) == mode
8336 	  && maybe_legitimize_operand_same_code (icode, opno, op))
8337 	return true;
8338 
8339       op->value = gen_reg_rtx (mode);
8340       break;
8341 
8342     case EXPAND_INPUT:
8343     input:
8344       gcc_assert (mode != VOIDmode);
8345       gcc_assert (GET_MODE (op->value) == VOIDmode
8346 		  || GET_MODE (op->value) == mode);
8347       if (maybe_legitimize_operand_same_code (icode, opno, op))
8348 	return true;
8349 
8350       op->value = copy_to_mode_reg (mode, op->value);
8351       break;
8352 
8353     case EXPAND_CONVERT_TO:
8354       gcc_assert (mode != VOIDmode);
8355       op->value = convert_to_mode (mode, op->value, op->unsigned_p);
8356       goto input;
8357 
8358     case EXPAND_CONVERT_FROM:
8359       if (GET_MODE (op->value) != VOIDmode)
8360 	mode = GET_MODE (op->value);
8361       else
8362 	/* The caller must tell us what mode this value has.  */
8363 	gcc_assert (mode != VOIDmode);
8364 
8365       imode = insn_data[(int) icode].operand[opno].mode;
8366       if (imode != VOIDmode && imode != mode)
8367 	{
8368 	  op->value = convert_modes (imode, mode, op->value, op->unsigned_p);
8369 	  mode = imode;
8370 	}
8371       goto input;
8372 
8373     case EXPAND_ADDRESS:
8374       gcc_assert (mode != VOIDmode);
8375       op->value = convert_memory_address (mode, op->value);
8376       goto input;
8377 
8378     case EXPAND_INTEGER:
8379       mode = insn_data[(int) icode].operand[opno].mode;
8380       if (mode != VOIDmode && const_int_operand (op->value, mode))
8381 	goto input;
8382       break;
8383     }
8384   return insn_operand_matches (icode, opno, op->value);
8385 }
8386 
8387 /* Make OP describe an input operand that should have the same value
8388    as VALUE, after any mode conversion that the target might request.
8389    TYPE is the type of VALUE.  */
8390 
8391 void
8392 create_convert_operand_from_type (struct expand_operand *op,
8393 				  rtx value, tree type)
8394 {
8395   create_convert_operand_from (op, value, TYPE_MODE (type),
8396 			       TYPE_UNSIGNED (type));
8397 }
8398 
8399 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
8400    of instruction ICODE.  Return true on success, leaving the new operand
8401    values in the OPS themselves.  Emit no code on failure.  */
8402 
8403 bool
8404 maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
8405 			   unsigned int nops, struct expand_operand *ops)
8406 {
8407   rtx_insn *last;
8408   unsigned int i;
8409 
8410   last = get_last_insn ();
8411   for (i = 0; i < nops; i++)
8412     if (!maybe_legitimize_operand (icode, opno + i, &ops[i]))
8413       {
8414 	delete_insns_since (last);
8415 	return false;
8416       }
8417   return true;
8418 }
8419 
8420 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
8421    as its operands.  Return the instruction pattern on success,
8422    and emit any necessary set-up code.  Return null and emit no
8423    code on failure.  */
8424 
8425 rtx
8426 maybe_gen_insn (enum insn_code icode, unsigned int nops,
8427 		struct expand_operand *ops)
8428 {
8429   gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
8430   if (!maybe_legitimize_operands (icode, 0, nops, ops))
8431     return NULL_RTX;
8432 
8433   switch (nops)
8434     {
8435     case 1:
8436       return GEN_FCN (icode) (ops[0].value);
8437     case 2:
8438       return GEN_FCN (icode) (ops[0].value, ops[1].value);
8439     case 3:
8440       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value);
8441     case 4:
8442       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8443 			      ops[3].value);
8444     case 5:
8445       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8446 			      ops[3].value, ops[4].value);
8447     case 6:
8448       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8449 			      ops[3].value, ops[4].value, ops[5].value);
8450     case 7:
8451       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8452 			      ops[3].value, ops[4].value, ops[5].value,
8453 			      ops[6].value);
8454     case 8:
8455       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8456 			      ops[3].value, ops[4].value, ops[5].value,
8457 			      ops[6].value, ops[7].value);
8458     case 9:
8459       return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value,
8460 			      ops[3].value, ops[4].value, ops[5].value,
8461 			      ops[6].value, ops[7].value, ops[8].value);
8462     }
8463   gcc_unreachable ();
8464 }
8465 
8466 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
8467    as its operands.  Return true on success and emit no code on failure.  */
8468 
8469 bool
8470 maybe_expand_insn (enum insn_code icode, unsigned int nops,
8471 		   struct expand_operand *ops)
8472 {
8473   rtx pat = maybe_gen_insn (icode, nops, ops);
8474   if (pat)
8475     {
8476       emit_insn (pat);
8477       return true;
8478     }
8479   return false;
8480 }
8481 
8482 /* Like maybe_expand_insn, but for jumps.  */
8483 
8484 bool
8485 maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
8486 			struct expand_operand *ops)
8487 {
8488   rtx pat = maybe_gen_insn (icode, nops, ops);
8489   if (pat)
8490     {
8491       emit_jump_insn (pat);
8492       return true;
8493     }
8494   return false;
8495 }
8496 
8497 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
8498    as its operands.  */
8499 
8500 void
8501 expand_insn (enum insn_code icode, unsigned int nops,
8502 	     struct expand_operand *ops)
8503 {
8504   if (!maybe_expand_insn (icode, nops, ops))
8505     gcc_unreachable ();
8506 }
8507 
8508 /* Like expand_insn, but for jumps.  */
8509 
8510 void
8511 expand_jump_insn (enum insn_code icode, unsigned int nops,
8512 		  struct expand_operand *ops)
8513 {
8514   if (!maybe_expand_jump_insn (icode, nops, ops))
8515     gcc_unreachable ();
8516 }
8517 
8518 /* Reduce conditional compilation elsewhere.  */
8519 #ifndef HAVE_insv
8520 #define HAVE_insv	0
8521 #define CODE_FOR_insv	CODE_FOR_nothing
8522 #endif
8523 #ifndef HAVE_extv
8524 #define HAVE_extv	0
8525 #define CODE_FOR_extv	CODE_FOR_nothing
8526 #endif
8527 #ifndef HAVE_extzv
8528 #define HAVE_extzv	0
8529 #define CODE_FOR_extzv	CODE_FOR_nothing
8530 #endif
8531 
8532 /* Enumerates the possible types of structure operand to an
8533    extraction_insn.  */
8534 enum extraction_type { ET_unaligned_mem, ET_reg };
8535 
8536 /* Check whether insv, extv or extzv pattern ICODE can be used for an
8537    insertion or extraction of type TYPE on a structure of mode MODE.
8538    Return true if so and fill in *INSN accordingly.  STRUCT_OP is the
8539    operand number of the structure (the first sign_extract or zero_extract
8540    operand) and FIELD_OP is the operand number of the field (the other
8541    side of the set from the sign_extract or zero_extract).  */
8542 
8543 static bool
8544 get_traditional_extraction_insn (extraction_insn *insn,
8545 				 enum extraction_type type,
8546 				 machine_mode mode,
8547 				 enum insn_code icode,
8548 				 int struct_op, int field_op)
8549 {
8550   const struct insn_data_d *data = &insn_data[icode];
8551 
8552   machine_mode struct_mode = data->operand[struct_op].mode;
8553   if (struct_mode == VOIDmode)
8554     struct_mode = word_mode;
8555   if (mode != struct_mode)
8556     return false;
8557 
8558   machine_mode field_mode = data->operand[field_op].mode;
8559   if (field_mode == VOIDmode)
8560     field_mode = word_mode;
8561 
8562   machine_mode pos_mode = data->operand[struct_op + 2].mode;
8563   if (pos_mode == VOIDmode)
8564     pos_mode = word_mode;
8565 
8566   insn->icode = icode;
8567   insn->field_mode = field_mode;
8568   insn->struct_mode = (type == ET_unaligned_mem ? byte_mode : struct_mode);
8569   insn->pos_mode = pos_mode;
8570   return true;
8571 }
8572 
8573 /* Return true if an optab exists to perform an insertion or extraction
8574    of type TYPE in mode MODE.  Describe the instruction in *INSN if so.
8575 
8576    REG_OPTAB is the optab to use for register structures and
8577    MISALIGN_OPTAB is the optab to use for misaligned memory structures.
8578    POS_OP is the operand number of the bit position.  */
8579 
8580 static bool
8581 get_optab_extraction_insn (struct extraction_insn *insn,
8582 			   enum extraction_type type,
8583 			   machine_mode mode, direct_optab reg_optab,
8584 			   direct_optab misalign_optab, int pos_op)
8585 {
8586   direct_optab optab = (type == ET_unaligned_mem ? misalign_optab : reg_optab);
8587   enum insn_code icode = direct_optab_handler (optab, mode);
8588   if (icode == CODE_FOR_nothing)
8589     return false;
8590 
8591   const struct insn_data_d *data = &insn_data[icode];
8592 
8593   insn->icode = icode;
8594   insn->field_mode = mode;
8595   insn->struct_mode = (type == ET_unaligned_mem ? BLKmode : mode);
8596   insn->pos_mode = data->operand[pos_op].mode;
8597   if (insn->pos_mode == VOIDmode)
8598     insn->pos_mode = word_mode;
8599   return true;
8600 }
8601 
8602 /* Return true if an instruction exists to perform an insertion or
8603    extraction (PATTERN says which) of type TYPE in mode MODE.
8604    Describe the instruction in *INSN if so.  */
8605 
8606 static bool
8607 get_extraction_insn (extraction_insn *insn,
8608 		     enum extraction_pattern pattern,
8609 		     enum extraction_type type,
8610 		     machine_mode mode)
8611 {
8612   switch (pattern)
8613     {
8614     case EP_insv:
8615       if (HAVE_insv
8616 	  && get_traditional_extraction_insn (insn, type, mode,
8617 					      CODE_FOR_insv, 0, 3))
8618 	return true;
8619       return get_optab_extraction_insn (insn, type, mode, insv_optab,
8620 					insvmisalign_optab, 2);
8621 
8622     case EP_extv:
8623       if (HAVE_extv
8624 	  && get_traditional_extraction_insn (insn, type, mode,
8625 					      CODE_FOR_extv, 1, 0))
8626 	return true;
8627       return get_optab_extraction_insn (insn, type, mode, extv_optab,
8628 					extvmisalign_optab, 3);
8629 
8630     case EP_extzv:
8631       if (HAVE_extzv
8632 	  && get_traditional_extraction_insn (insn, type, mode,
8633 					      CODE_FOR_extzv, 1, 0))
8634 	return true;
8635       return get_optab_extraction_insn (insn, type, mode, extzv_optab,
8636 					extzvmisalign_optab, 3);
8637 
8638     default:
8639       gcc_unreachable ();
8640     }
8641 }
8642 
8643 /* Return true if an instruction exists to access a field of mode
8644    FIELDMODE in a structure that has STRUCT_BITS significant bits.
8645    Describe the "best" such instruction in *INSN if so.  PATTERN and
8646    TYPE describe the type of insertion or extraction we want to perform.
8647 
8648    For an insertion, the number of significant structure bits includes
8649    all bits of the target.  For an extraction, it need only include the
8650    most significant bit of the field.  Larger widths are acceptable
8651    in both cases.  */
8652 
8653 static bool
8654 get_best_extraction_insn (extraction_insn *insn,
8655 			  enum extraction_pattern pattern,
8656 			  enum extraction_type type,
8657 			  unsigned HOST_WIDE_INT struct_bits,
8658 			  machine_mode field_mode)
8659 {
8660   machine_mode mode = smallest_mode_for_size (struct_bits, MODE_INT);
8661   while (mode != VOIDmode)
8662     {
8663       if (get_extraction_insn (insn, pattern, type, mode))
8664 	{
8665 	  while (mode != VOIDmode
8666 		 && GET_MODE_SIZE (mode) <= GET_MODE_SIZE (field_mode)
8667 		 && !TRULY_NOOP_TRUNCATION_MODES_P (insn->field_mode,
8668 						    field_mode))
8669 	    {
8670 	      get_extraction_insn (insn, pattern, type, mode);
8671 	      mode = GET_MODE_WIDER_MODE (mode);
8672 	    }
8673 	  return true;
8674 	}
8675       mode = GET_MODE_WIDER_MODE (mode);
8676     }
8677   return false;
8678 }
8679 
8680 /* Return true if an instruction exists to access a field of mode
8681    FIELDMODE in a register structure that has STRUCT_BITS significant bits.
8682    Describe the "best" such instruction in *INSN if so.  PATTERN describes
8683    the type of insertion or extraction we want to perform.
8684 
8685    For an insertion, the number of significant structure bits includes
8686    all bits of the target.  For an extraction, it need only include the
8687    most significant bit of the field.  Larger widths are acceptable
8688    in both cases.  */
8689 
8690 bool
8691 get_best_reg_extraction_insn (extraction_insn *insn,
8692 			      enum extraction_pattern pattern,
8693 			      unsigned HOST_WIDE_INT struct_bits,
8694 			      machine_mode field_mode)
8695 {
8696   return get_best_extraction_insn (insn, pattern, ET_reg, struct_bits,
8697 				   field_mode);
8698 }
8699 
8700 /* Return true if an instruction exists to access a field of BITSIZE
8701    bits starting BITNUM bits into a memory structure.  Describe the
8702    "best" such instruction in *INSN if so.  PATTERN describes the type
8703    of insertion or extraction we want to perform and FIELDMODE is the
8704    natural mode of the extracted field.
8705 
8706    The instructions considered here only access bytes that overlap
8707    the bitfield; they do not touch any surrounding bytes.  */
8708 
8709 bool
8710 get_best_mem_extraction_insn (extraction_insn *insn,
8711 			      enum extraction_pattern pattern,
8712 			      HOST_WIDE_INT bitsize, HOST_WIDE_INT bitnum,
8713 			      machine_mode field_mode)
8714 {
8715   unsigned HOST_WIDE_INT struct_bits = (bitnum % BITS_PER_UNIT
8716 					+ bitsize
8717 					+ BITS_PER_UNIT - 1);
8718   struct_bits -= struct_bits % BITS_PER_UNIT;
8719   return get_best_extraction_insn (insn, pattern, ET_unaligned_mem,
8720 				   struct_bits, field_mode);
8721 }
8722 
8723 /* Determine whether "1 << x" is relatively cheap in word_mode.  */
8724 
8725 bool
8726 lshift_cheap_p (bool speed_p)
8727 {
8728   /* FIXME: This should be made target dependent via this "this_target"
8729      mechanism, similar to e.g. can_copy_init_p in gcse.c.  */
8730   static bool init[2] = { false, false };
8731   static bool cheap[2] = { true, true };
8732 
8733   /* If the targer has no lshift in word_mode, the operation will most
8734      probably not be cheap.  ??? Does GCC even work for such targets?  */
8735   if (optab_handler (ashl_optab, word_mode) == CODE_FOR_nothing)
8736     return false;
8737 
8738   if (!init[speed_p])
8739     {
8740       rtx reg = gen_raw_REG (word_mode, 10000);
8741       int cost = set_src_cost (gen_rtx_ASHIFT (word_mode, const1_rtx, reg),
8742 			       speed_p);
8743       cheap[speed_p] = cost < COSTS_N_INSNS (3);
8744       init[speed_p] = true;
8745     }
8746 
8747   return cheap[speed_p];
8748 }
8749 
8750 #include "gt-optabs.h"
8751