xref: /netbsd-src/external/gpl3/gcc/dist/gcc/expr.cc (revision 2683f5b185977c9184701f18c843971cd908b00e)
1 /* Convert tree expression to rtl instructions, for GNU compiler.
2    Copyright (C) 1988-2022 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "ssa.h"
32 #include "optabs.h"
33 #include "expmed.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "attribs.h"
43 #include "varasm.h"
44 #include "except.h"
45 #include "insn-attr.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "stmt.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
51 #include "expr.h"
52 #include "optabs-tree.h"
53 #include "libfuncs.h"
54 #include "reload.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-dfa.h"
58 #include "tree-ssa-live.h"
59 #include "tree-outof-ssa.h"
60 #include "tree-ssa-address.h"
61 #include "builtins.h"
62 #include "ccmp.h"
63 #include "gimple-fold.h"
64 #include "rtx-vector-builder.h"
65 #include "tree-pretty-print.h"
66 #include "flags.h"
67 
68 
69 /* If this is nonzero, we do not bother generating VOLATILE
70    around volatile memory references, and we are willing to
71    output indirect addresses.  If cse is to follow, we reject
72    indirect addresses so a useful potential cse is generated;
73    if it is used only once, instruction combination will produce
74    the same indirect address eventually.  */
75 int cse_not_expected;
76 
77 static bool block_move_libcall_safe_for_call_parm (void);
78 static bool emit_block_move_via_pattern (rtx, rtx, rtx, unsigned, unsigned,
79 					 HOST_WIDE_INT, unsigned HOST_WIDE_INT,
80 					 unsigned HOST_WIDE_INT,
81 					 unsigned HOST_WIDE_INT, bool);
82 static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
83 static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
84 static rtx_insn *compress_float_constant (rtx, rtx);
85 static rtx get_subtarget (rtx);
86 static void store_constructor (tree, rtx, int, poly_int64, bool);
87 static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
88 			machine_mode, tree, alias_set_type, bool, bool);
89 
90 static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
91 
92 static int is_aligning_offset (const_tree, const_tree);
93 static rtx reduce_to_bit_field_precision (rtx, rtx, tree);
94 static rtx do_store_flag (sepops, rtx, machine_mode);
95 #ifdef PUSH_ROUNDING
96 static void emit_single_push_insn (machine_mode, rtx, tree);
97 #endif
98 static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx,
99 			  profile_probability);
100 static rtx const_vector_from_tree (tree);
101 static tree tree_expr_size (const_tree);
102 static HOST_WIDE_INT int_expr_size (tree);
103 static void convert_mode_scalar (rtx, rtx, int);
104 
105 
106 /* This is run to set up which modes can be used
107    directly in memory and to initialize the block move optab.  It is run
108    at the beginning of compilation and when the target is reinitialized.  */
109 
110 void
init_expr_target(void)111 init_expr_target (void)
112 {
113   rtx pat;
114   int num_clobbers;
115   rtx mem, mem1;
116   rtx reg;
117 
118   /* Try indexing by frame ptr and try by stack ptr.
119      It is known that on the Convex the stack ptr isn't a valid index.
120      With luck, one or the other is valid on any machine.  */
121   mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
122   mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx);
123 
124   /* A scratch register we can modify in-place below to avoid
125      useless RTL allocations.  */
126   reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
127 
128   rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN));
129   pat = gen_rtx_SET (NULL_RTX, NULL_RTX);
130   PATTERN (insn) = pat;
131 
132   for (machine_mode mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
133        mode = (machine_mode) ((int) mode + 1))
134     {
135       int regno;
136 
137       direct_load[(int) mode] = direct_store[(int) mode] = 0;
138       PUT_MODE (mem, mode);
139       PUT_MODE (mem1, mode);
140 
141       /* See if there is some register that can be used in this mode and
142 	 directly loaded or stored from memory.  */
143 
144       if (mode != VOIDmode && mode != BLKmode)
145 	for (regno = 0; regno < FIRST_PSEUDO_REGISTER
146 	     && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
147 	     regno++)
148 	  {
149 	    if (!targetm.hard_regno_mode_ok (regno, mode))
150 	      continue;
151 
152 	    set_mode_and_regno (reg, mode, regno);
153 
154 	    SET_SRC (pat) = mem;
155 	    SET_DEST (pat) = reg;
156 	    if (recog (pat, insn, &num_clobbers) >= 0)
157 	      direct_load[(int) mode] = 1;
158 
159 	    SET_SRC (pat) = mem1;
160 	    SET_DEST (pat) = reg;
161 	    if (recog (pat, insn, &num_clobbers) >= 0)
162 	      direct_load[(int) mode] = 1;
163 
164 	    SET_SRC (pat) = reg;
165 	    SET_DEST (pat) = mem;
166 	    if (recog (pat, insn, &num_clobbers) >= 0)
167 	      direct_store[(int) mode] = 1;
168 
169 	    SET_SRC (pat) = reg;
170 	    SET_DEST (pat) = mem1;
171 	    if (recog (pat, insn, &num_clobbers) >= 0)
172 	      direct_store[(int) mode] = 1;
173 	  }
174     }
175 
176   mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
177 
178   opt_scalar_float_mode mode_iter;
179   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)
180     {
181       scalar_float_mode mode = mode_iter.require ();
182       scalar_float_mode srcmode;
183       FOR_EACH_MODE_UNTIL (srcmode, mode)
184 	{
185 	  enum insn_code ic;
186 
187 	  ic = can_extend_p (mode, srcmode, 0);
188 	  if (ic == CODE_FOR_nothing)
189 	    continue;
190 
191 	  PUT_MODE (mem, srcmode);
192 
193 	  if (insn_operand_matches (ic, 1, mem))
194 	    float_extend_from_mem[mode][srcmode] = true;
195 	}
196     }
197 }
198 
199 /* This is run at the start of compiling a function.  */
200 
201 void
init_expr(void)202 init_expr (void)
203 {
204   memset (&crtl->expr, 0, sizeof (crtl->expr));
205 }
206 
207 /* Copy data from FROM to TO, where the machine modes are not the same.
208    Both modes may be integer, or both may be floating, or both may be
209    fixed-point.
210    UNSIGNEDP should be nonzero if FROM is an unsigned type.
211    This causes zero-extension instead of sign-extension.  */
212 
213 void
convert_move(rtx to,rtx from,int unsignedp)214 convert_move (rtx to, rtx from, int unsignedp)
215 {
216   machine_mode to_mode = GET_MODE (to);
217   machine_mode from_mode = GET_MODE (from);
218 
219   gcc_assert (to_mode != BLKmode);
220   gcc_assert (from_mode != BLKmode);
221 
222   /* If the source and destination are already the same, then there's
223      nothing to do.  */
224   if (to == from)
225     return;
226 
227   /* If FROM is a SUBREG that indicates that we have already done at least
228      the required extension, strip it.  We don't handle such SUBREGs as
229      TO here.  */
230 
231   scalar_int_mode to_int_mode;
232   if (GET_CODE (from) == SUBREG
233       && SUBREG_PROMOTED_VAR_P (from)
234       && is_a <scalar_int_mode> (to_mode, &to_int_mode)
235       && (GET_MODE_PRECISION (subreg_promoted_mode (from))
236 	  >= GET_MODE_PRECISION (to_int_mode))
237       && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
238     {
239       scalar_int_mode int_orig_mode;
240       scalar_int_mode int_inner_mode;
241       machine_mode orig_mode = GET_MODE (from);
242 
243       from = gen_lowpart (to_int_mode, SUBREG_REG (from));
244       from_mode = to_int_mode;
245 
246       /* Preserve SUBREG_PROMOTED_VAR_P if the new mode is wider than
247 	 the original mode, but narrower than the inner mode.  */
248       if (GET_CODE (from) == SUBREG
249 	  && is_a <scalar_int_mode> (orig_mode, &int_orig_mode)
250 	  && GET_MODE_PRECISION (to_int_mode)
251 	     > GET_MODE_PRECISION (int_orig_mode)
252 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (from)),
253 				     &int_inner_mode)
254 	  && GET_MODE_PRECISION (int_inner_mode)
255 	     > GET_MODE_PRECISION (to_int_mode))
256 	{
257 	  SUBREG_PROMOTED_VAR_P (from) = 1;
258 	  SUBREG_PROMOTED_SET (from, unsignedp);
259 	}
260     }
261 
262   gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
263 
264   if (to_mode == from_mode
265       || (from_mode == VOIDmode && CONSTANT_P (from)))
266     {
267       emit_move_insn (to, from);
268       return;
269     }
270 
271   if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
272     {
273       if (GET_MODE_UNIT_PRECISION (to_mode)
274 	  > GET_MODE_UNIT_PRECISION (from_mode))
275 	{
276 	  optab op = unsignedp ? zext_optab : sext_optab;
277 	  insn_code icode = convert_optab_handler (op, to_mode, from_mode);
278 	  if (icode != CODE_FOR_nothing)
279 	    {
280 	      emit_unop_insn (icode, to, from,
281 			      unsignedp ? ZERO_EXTEND : SIGN_EXTEND);
282 	      return;
283 	    }
284 	}
285 
286       if (GET_MODE_UNIT_PRECISION (to_mode)
287 	  < GET_MODE_UNIT_PRECISION (from_mode))
288 	{
289 	  insn_code icode = convert_optab_handler (trunc_optab,
290 						   to_mode, from_mode);
291 	  if (icode != CODE_FOR_nothing)
292 	    {
293 	      emit_unop_insn (icode, to, from, TRUNCATE);
294 	      return;
295 	    }
296 	}
297 
298       gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
299 			    GET_MODE_BITSIZE (to_mode)));
300 
301       if (VECTOR_MODE_P (to_mode))
302 	from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
303       else
304 	to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0);
305 
306       emit_move_insn (to, from);
307       return;
308     }
309 
310   if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT)
311     {
312       convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp);
313       convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp);
314       return;
315     }
316 
317   convert_mode_scalar (to, from, unsignedp);
318 }
319 
320 /* Like convert_move, but deals only with scalar modes.  */
321 
322 static void
convert_mode_scalar(rtx to,rtx from,int unsignedp)323 convert_mode_scalar (rtx to, rtx from, int unsignedp)
324 {
325   /* Both modes should be scalar types.  */
326   scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
327   scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
328   bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
329   bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
330   enum insn_code code;
331   rtx libcall;
332 
333   gcc_assert (to_real == from_real);
334 
335   /* rtx code for making an equivalent value.  */
336   enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
337 			      : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
338 
339   if (to_real)
340     {
341       rtx value;
342       rtx_insn *insns;
343       convert_optab tab;
344 
345       gcc_assert ((GET_MODE_PRECISION (from_mode)
346 		   != GET_MODE_PRECISION (to_mode))
347 		  || (DECIMAL_FLOAT_MODE_P (from_mode)
348 		      != DECIMAL_FLOAT_MODE_P (to_mode)));
349 
350       if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
351 	/* Conversion between decimal float and binary float, same size.  */
352 	tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
353       else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
354 	tab = sext_optab;
355       else
356 	tab = trunc_optab;
357 
358       /* Try converting directly if the insn is supported.  */
359 
360       code = convert_optab_handler (tab, to_mode, from_mode);
361       if (code != CODE_FOR_nothing)
362 	{
363 	  emit_unop_insn (code, to, from,
364 			  tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE);
365 	  return;
366 	}
367 
368       /* Otherwise use a libcall.  */
369       libcall = convert_optab_libfunc (tab, to_mode, from_mode);
370 
371       /* Is this conversion implemented yet?  */
372       gcc_assert (libcall);
373 
374       start_sequence ();
375       value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode,
376 				       from, from_mode);
377       insns = get_insns ();
378       end_sequence ();
379       emit_libcall_block (insns, to, value,
380 			  tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
381 								       from)
382 			  : gen_rtx_FLOAT_EXTEND (to_mode, from));
383       return;
384     }
385 
386   /* Handle pointer conversion.  */			/* SPEE 900220.  */
387   /* If the target has a converter from FROM_MODE to TO_MODE, use it.  */
388   {
389     convert_optab ctab;
390 
391     if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
392       ctab = trunc_optab;
393     else if (unsignedp)
394       ctab = zext_optab;
395     else
396       ctab = sext_optab;
397 
398     if (convert_optab_handler (ctab, to_mode, from_mode)
399 	!= CODE_FOR_nothing)
400       {
401 	emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode),
402 			to, from, UNKNOWN);
403 	return;
404       }
405   }
406 
407   /* Targets are expected to provide conversion insns between PxImode and
408      xImode for all MODE_PARTIAL_INT modes they use, but no others.  */
409   if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
410     {
411       scalar_int_mode full_mode
412 	= smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
413 
414       gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
415 		  != CODE_FOR_nothing);
416 
417       if (full_mode != from_mode)
418 	from = convert_to_mode (full_mode, from, unsignedp);
419       emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
420 		      to, from, UNKNOWN);
421       return;
422     }
423   if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
424     {
425       rtx new_from;
426       scalar_int_mode full_mode
427 	= smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
428       convert_optab ctab = unsignedp ? zext_optab : sext_optab;
429       enum insn_code icode;
430 
431       icode = convert_optab_handler (ctab, full_mode, from_mode);
432       gcc_assert (icode != CODE_FOR_nothing);
433 
434       if (to_mode == full_mode)
435 	{
436 	  emit_unop_insn (icode, to, from, UNKNOWN);
437 	  return;
438 	}
439 
440       new_from = gen_reg_rtx (full_mode);
441       emit_unop_insn (icode, new_from, from, UNKNOWN);
442 
443       /* else proceed to integer conversions below.  */
444       from_mode = full_mode;
445       from = new_from;
446     }
447 
448    /* Make sure both are fixed-point modes or both are not.  */
449    gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==
450 	       ALL_SCALAR_FIXED_POINT_MODE_P (to_mode));
451    if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode))
452     {
453       /* If we widen from_mode to to_mode and they are in the same class,
454 	 we won't saturate the result.
455 	 Otherwise, always saturate the result to play safe.  */
456       if (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
457 	  && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
458 	expand_fixed_convert (to, from, 0, 0);
459       else
460 	expand_fixed_convert (to, from, 0, 1);
461       return;
462     }
463 
464   /* Now both modes are integers.  */
465 
466   /* Handle expanding beyond a word.  */
467   if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
468       && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
469     {
470       rtx_insn *insns;
471       rtx lowpart;
472       rtx fill_value;
473       rtx lowfrom;
474       int i;
475       scalar_mode lowpart_mode;
476       int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
477 
478       /* Try converting directly if the insn is supported.  */
479       if ((code = can_extend_p (to_mode, from_mode, unsignedp))
480 	  != CODE_FOR_nothing)
481 	{
482 	  /* If FROM is a SUBREG, put it into a register.  Do this
483 	     so that we always generate the same set of insns for
484 	     better cse'ing; if an intermediate assignment occurred,
485 	     we won't be doing the operation directly on the SUBREG.  */
486 	  if (optimize > 0 && GET_CODE (from) == SUBREG)
487 	    from = force_reg (from_mode, from);
488 	  emit_unop_insn (code, to, from, equiv_code);
489 	  return;
490 	}
491       /* Next, try converting via full word.  */
492       else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
493 	       && ((code = can_extend_p (to_mode, word_mode, unsignedp))
494 		   != CODE_FOR_nothing))
495 	{
496 	  rtx word_to = gen_reg_rtx (word_mode);
497 	  if (REG_P (to))
498 	    {
499 	      if (reg_overlap_mentioned_p (to, from))
500 		from = force_reg (from_mode, from);
501 	      emit_clobber (to);
502 	    }
503 	  convert_move (word_to, from, unsignedp);
504 	  emit_unop_insn (code, to, word_to, equiv_code);
505 	  return;
506 	}
507 
508       /* No special multiword conversion insn; do it by hand.  */
509       start_sequence ();
510 
511       /* Since we will turn this into a no conflict block, we must ensure
512          the source does not overlap the target so force it into an isolated
513          register when maybe so.  Likewise for any MEM input, since the
514          conversion sequence might require several references to it and we
515          must ensure we're getting the same value every time.  */
516 
517       if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
518 	from = force_reg (from_mode, from);
519 
520       /* Get a copy of FROM widened to a word, if necessary.  */
521       if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
522 	lowpart_mode = word_mode;
523       else
524 	lowpart_mode = from_mode;
525 
526       lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
527 
528       lowpart = gen_lowpart (lowpart_mode, to);
529       emit_move_insn (lowpart, lowfrom);
530 
531       /* Compute the value to put in each remaining word.  */
532       if (unsignedp)
533 	fill_value = const0_rtx;
534       else
535 	fill_value = emit_store_flag_force (gen_reg_rtx (word_mode),
536 					    LT, lowfrom, const0_rtx,
537 					    lowpart_mode, 0, -1);
538 
539       /* Fill the remaining words.  */
540       for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
541 	{
542 	  int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
543 	  rtx subword = operand_subword (to, index, 1, to_mode);
544 
545 	  gcc_assert (subword);
546 
547 	  if (fill_value != subword)
548 	    emit_move_insn (subword, fill_value);
549 	}
550 
551       insns = get_insns ();
552       end_sequence ();
553 
554       emit_insn (insns);
555       return;
556     }
557 
558   /* Truncating multi-word to a word or less.  */
559   if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
560       && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
561     {
562       if (!((MEM_P (from)
563 	     && ! MEM_VOLATILE_P (from)
564 	     && direct_load[(int) to_mode]
565 	     && ! mode_dependent_address_p (XEXP (from, 0),
566 					    MEM_ADDR_SPACE (from)))
567 	    || REG_P (from)
568 	    || GET_CODE (from) == SUBREG))
569 	from = force_reg (from_mode, from);
570       convert_move (to, gen_lowpart (word_mode, from), 0);
571       return;
572     }
573 
574   /* Now follow all the conversions between integers
575      no more than a word long.  */
576 
577   /* For truncation, usually we can just refer to FROM in a narrower mode.  */
578   if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
579       && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
580     {
581       if (!((MEM_P (from)
582 	     && ! MEM_VOLATILE_P (from)
583 	     && direct_load[(int) to_mode]
584 	     && ! mode_dependent_address_p (XEXP (from, 0),
585 					    MEM_ADDR_SPACE (from)))
586 	    || REG_P (from)
587 	    || GET_CODE (from) == SUBREG))
588 	from = force_reg (from_mode, from);
589       if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
590 	  && !targetm.hard_regno_mode_ok (REGNO (from), to_mode))
591 	from = copy_to_reg (from);
592       emit_move_insn (to, gen_lowpart (to_mode, from));
593       return;
594     }
595 
596   /* Handle extension.  */
597   if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
598     {
599       /* Convert directly if that works.  */
600       if ((code = can_extend_p (to_mode, from_mode, unsignedp))
601 	  != CODE_FOR_nothing)
602 	{
603 	  emit_unop_insn (code, to, from, equiv_code);
604 	  return;
605 	}
606       else
607 	{
608 	  rtx tmp;
609 	  int shift_amount;
610 
611 	  /* Search for a mode to convert via.  */
612 	  opt_scalar_mode intermediate_iter;
613 	  FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
614 	    {
615 	      scalar_mode intermediate = intermediate_iter.require ();
616 	      if (((can_extend_p (to_mode, intermediate, unsignedp)
617 		    != CODE_FOR_nothing)
618 		   || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
619 		       && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
620 							 intermediate)))
621 		  && (can_extend_p (intermediate, from_mode, unsignedp)
622 		      != CODE_FOR_nothing))
623 		{
624 		  convert_move (to, convert_to_mode (intermediate, from,
625 						     unsignedp), unsignedp);
626 		  return;
627 		}
628 	    }
629 
630 	  /* No suitable intermediate mode.
631 	     Generate what we need with	shifts.  */
632 	  shift_amount = (GET_MODE_PRECISION (to_mode)
633 			  - GET_MODE_PRECISION (from_mode));
634 	  from = gen_lowpart (to_mode, force_reg (from_mode, from));
635 	  tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
636 			      to, unsignedp);
637 	  tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
638 			      to, unsignedp);
639 	  if (tmp != to)
640 	    emit_move_insn (to, tmp);
641 	  return;
642 	}
643     }
644 
645   /* Support special truncate insns for certain modes.  */
646   if (convert_optab_handler (trunc_optab, to_mode,
647 			     from_mode) != CODE_FOR_nothing)
648     {
649       emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
650 		      to, from, UNKNOWN);
651       return;
652     }
653 
654   /* Handle truncation of volatile memrefs, and so on;
655      the things that couldn't be truncated directly,
656      and for which there was no special instruction.
657 
658      ??? Code above formerly short-circuited this, for most integer
659      mode pairs, with a force_reg in from_mode followed by a recursive
660      call to this routine.  Appears always to have been wrong.  */
661   if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
662     {
663       rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
664       emit_move_insn (to, temp);
665       return;
666     }
667 
668   /* Mode combination is not recognized.  */
669   gcc_unreachable ();
670 }
671 
672 /* Return an rtx for a value that would result
673    from converting X to mode MODE.
674    Both X and MODE may be floating, or both integer.
675    UNSIGNEDP is nonzero if X is an unsigned value.
676    This can be done by referring to a part of X in place
677    or by copying to a new temporary with conversion.  */
678 
679 rtx
convert_to_mode(machine_mode mode,rtx x,int unsignedp)680 convert_to_mode (machine_mode mode, rtx x, int unsignedp)
681 {
682   return convert_modes (mode, VOIDmode, x, unsignedp);
683 }
684 
685 /* Return an rtx for a value that would result
686    from converting X from mode OLDMODE to mode MODE.
687    Both modes may be floating, or both integer.
688    UNSIGNEDP is nonzero if X is an unsigned value.
689 
690    This can be done by referring to a part of X in place
691    or by copying to a new temporary with conversion.
692 
693    You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode.  */
694 
695 rtx
convert_modes(machine_mode mode,machine_mode oldmode,rtx x,int unsignedp)696 convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
697 {
698   rtx temp;
699   scalar_int_mode int_mode;
700 
701   /* If FROM is a SUBREG that indicates that we have already done at least
702      the required extension, strip it.  */
703 
704   if (GET_CODE (x) == SUBREG
705       && SUBREG_PROMOTED_VAR_P (x)
706       && is_a <scalar_int_mode> (mode, &int_mode)
707       && (GET_MODE_PRECISION (subreg_promoted_mode (x))
708 	  >= GET_MODE_PRECISION (int_mode))
709       && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
710     {
711       scalar_int_mode int_orig_mode;
712       scalar_int_mode int_inner_mode;
713       machine_mode orig_mode = GET_MODE (x);
714       x = gen_lowpart (int_mode, SUBREG_REG (x));
715 
716       /* Preserve SUBREG_PROMOTED_VAR_P if the new mode is wider than
717 	 the original mode, but narrower than the inner mode.  */
718       if (GET_CODE (x) == SUBREG
719 	  && is_a <scalar_int_mode> (orig_mode, &int_orig_mode)
720 	  && GET_MODE_PRECISION (int_mode)
721 	     > GET_MODE_PRECISION (int_orig_mode)
722 	  && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)),
723 				     &int_inner_mode)
724 	  && GET_MODE_PRECISION (int_inner_mode)
725 	     > GET_MODE_PRECISION (int_mode))
726 	{
727 	  SUBREG_PROMOTED_VAR_P (x) = 1;
728 	  SUBREG_PROMOTED_SET (x, unsignedp);
729 	}
730     }
731 
732   if (GET_MODE (x) != VOIDmode)
733     oldmode = GET_MODE (x);
734 
735   if (mode == oldmode)
736     return x;
737 
738   if (CONST_SCALAR_INT_P (x)
739       && is_a <scalar_int_mode> (mode, &int_mode))
740     {
741       /* If the caller did not tell us the old mode, then there is not
742 	 much to do with respect to canonicalization.  We have to
743 	 assume that all the bits are significant.  */
744       if (!is_a <scalar_int_mode> (oldmode))
745 	oldmode = MAX_MODE_INT;
746       wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
747 				   GET_MODE_PRECISION (int_mode),
748 				   unsignedp ? UNSIGNED : SIGNED);
749       return immed_wide_int_const (w, int_mode);
750     }
751 
752   /* We can do this with a gen_lowpart if both desired and current modes
753      are integer, and this is either a constant integer, a register, or a
754      non-volatile MEM. */
755   scalar_int_mode int_oldmode;
756   if (is_int_mode (mode, &int_mode)
757       && is_int_mode (oldmode, &int_oldmode)
758       && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
759       && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
760 	  || CONST_POLY_INT_P (x)
761           || (REG_P (x)
762               && (!HARD_REGISTER_P (x)
763 		  || targetm.hard_regno_mode_ok (REGNO (x), int_mode))
764               && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
765    return gen_lowpart (int_mode, x);
766 
767   /* Converting from integer constant into mode is always equivalent to an
768      subreg operation.  */
769   if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
770     {
771       gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
772 			    GET_MODE_BITSIZE (oldmode)));
773       return simplify_gen_subreg (mode, x, oldmode, 0);
774     }
775 
776   temp = gen_reg_rtx (mode);
777   convert_move (temp, x, unsignedp);
778   return temp;
779 }
780 
781 /* Return the largest alignment we can use for doing a move (or store)
782    of MAX_PIECES.  ALIGN is the largest alignment we could use.  */
783 
784 static unsigned int
alignment_for_piecewise_move(unsigned int max_pieces,unsigned int align)785 alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
786 {
787   scalar_int_mode tmode
788     = int_mode_for_size (max_pieces * BITS_PER_UNIT, 0).require ();
789 
790   if (align >= GET_MODE_ALIGNMENT (tmode))
791     align = GET_MODE_ALIGNMENT (tmode);
792   else
793     {
794       scalar_int_mode xmode = NARROWEST_INT_MODE;
795       opt_scalar_int_mode mode_iter;
796       FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
797 	{
798 	  tmode = mode_iter.require ();
799 	  if (GET_MODE_SIZE (tmode) > max_pieces
800 	      || targetm.slow_unaligned_access (tmode, align))
801 	    break;
802 	  xmode = tmode;
803 	}
804 
805       align = MAX (align, GET_MODE_ALIGNMENT (xmode));
806     }
807 
808   return align;
809 }
810 
811 /* Return the widest QI vector, if QI_MODE is true, or integer mode
812    that is narrower than SIZE bytes.  */
813 
814 static fixed_size_mode
widest_fixed_size_mode_for_size(unsigned int size,bool qi_vector)815 widest_fixed_size_mode_for_size (unsigned int size, bool qi_vector)
816 {
817   fixed_size_mode result = NARROWEST_INT_MODE;
818 
819   gcc_checking_assert (size > 1);
820 
821   /* Use QI vector only if size is wider than a WORD.  */
822   if (qi_vector && size > UNITS_PER_WORD)
823     {
824       machine_mode mode;
825       fixed_size_mode candidate;
826       FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
827 	if (is_a<fixed_size_mode> (mode, &candidate)
828 	    && GET_MODE_INNER (candidate) == QImode)
829 	  {
830 	    if (GET_MODE_SIZE (candidate) >= size)
831 	      break;
832 	    if (optab_handler (vec_duplicate_optab, candidate)
833 		!= CODE_FOR_nothing)
834 	      result = candidate;
835 	  }
836 
837       if (result != NARROWEST_INT_MODE)
838 	return result;
839     }
840 
841   opt_scalar_int_mode tmode;
842   FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
843     if (GET_MODE_SIZE (tmode.require ()) < size)
844       result = tmode.require ();
845 
846   return result;
847 }
848 
849 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
850    and should be performed piecewise.  */
851 
852 static bool
can_do_by_pieces(unsigned HOST_WIDE_INT len,unsigned int align,enum by_pieces_operation op)853 can_do_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align,
854 		  enum by_pieces_operation op)
855 {
856   return targetm.use_by_pieces_infrastructure_p (len, align, op,
857 						 optimize_insn_for_speed_p ());
858 }
859 
860 /* Determine whether the LEN bytes can be moved by using several move
861    instructions.  Return nonzero if a call to move_by_pieces should
862    succeed.  */
863 
864 bool
can_move_by_pieces(unsigned HOST_WIDE_INT len,unsigned int align)865 can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align)
866 {
867   return can_do_by_pieces (len, align, MOVE_BY_PIECES);
868 }
869 
870 /* Return number of insns required to perform operation OP by pieces
871    for L bytes.  ALIGN (in bits) is maximum alignment we can assume.  */
872 
873 unsigned HOST_WIDE_INT
by_pieces_ninsns(unsigned HOST_WIDE_INT l,unsigned int align,unsigned int max_size,by_pieces_operation op)874 by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align,
875 		  unsigned int max_size, by_pieces_operation op)
876 {
877   unsigned HOST_WIDE_INT n_insns = 0;
878   fixed_size_mode mode;
879 
880   if (targetm.overlap_op_by_pieces_p () && op != COMPARE_BY_PIECES)
881     {
882       /* NB: Round up L and ALIGN to the widest integer mode for
883 	 MAX_SIZE.  */
884       mode = widest_fixed_size_mode_for_size (max_size,
885 					      op == SET_BY_PIECES);
886       if (optab_handler (mov_optab, mode) != CODE_FOR_nothing)
887 	{
888 	  unsigned HOST_WIDE_INT up = ROUND_UP (l, GET_MODE_SIZE (mode));
889 	  if (up > l)
890 	    l = up;
891 	  align = GET_MODE_ALIGNMENT (mode);
892 	}
893     }
894 
895   align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
896 
897   while (max_size > 1 && l > 0)
898     {
899       mode = widest_fixed_size_mode_for_size (max_size,
900 					      op == SET_BY_PIECES);
901       enum insn_code icode;
902 
903       unsigned int modesize = GET_MODE_SIZE (mode);
904 
905       icode = optab_handler (mov_optab, mode);
906       if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
907 	{
908 	  unsigned HOST_WIDE_INT n_pieces = l / modesize;
909 	  l %= modesize;
910 	  switch (op)
911 	    {
912 	    default:
913 	      n_insns += n_pieces;
914 	      break;
915 
916 	    case COMPARE_BY_PIECES:
917 	      int batch = targetm.compare_by_pieces_branch_ratio (mode);
918 	      int batch_ops = 4 * batch - 1;
919 	      unsigned HOST_WIDE_INT full = n_pieces / batch;
920 	      n_insns += full * batch_ops;
921 	      if (n_pieces % batch != 0)
922 		n_insns++;
923 	      break;
924 
925 	    }
926 	}
927       max_size = modesize;
928     }
929 
930   gcc_assert (!l);
931   return n_insns;
932 }
933 
934 /* Used when performing piecewise block operations, holds information
935    about one of the memory objects involved.  The member functions
936    can be used to generate code for loading from the object and
937    updating the address when iterating.  */
938 
939 class pieces_addr
940 {
941   /* The object being referenced, a MEM.  Can be NULL_RTX to indicate
942      stack pushes.  */
943   rtx m_obj;
944   /* The address of the object.  Can differ from that seen in the
945      MEM rtx if we copied the address to a register.  */
946   rtx m_addr;
947   /* Nonzero if the address on the object has an autoincrement already,
948      signifies whether that was an increment or decrement.  */
949   signed char m_addr_inc;
950   /* Nonzero if we intend to use autoinc without the address already
951      having autoinc form.  We will insert add insns around each memory
952      reference, expecting later passes to form autoinc addressing modes.
953      The only supported options are predecrement and postincrement.  */
954   signed char m_explicit_inc;
955   /* True if we have either of the two possible cases of using
956      autoincrement.  */
957   bool m_auto;
958   /* True if this is an address to be used for load operations rather
959      than stores.  */
960   bool m_is_load;
961 
962   /* Optionally, a function to obtain constants for any given offset into
963      the objects, and data associated with it.  */
964   by_pieces_constfn m_constfn;
965   void *m_cfndata;
966 public:
967   pieces_addr (rtx, bool, by_pieces_constfn, void *);
968   rtx adjust (fixed_size_mode, HOST_WIDE_INT, by_pieces_prev * = nullptr);
969   void increment_address (HOST_WIDE_INT);
970   void maybe_predec (HOST_WIDE_INT);
971   void maybe_postinc (HOST_WIDE_INT);
972   void decide_autoinc (machine_mode, bool, HOST_WIDE_INT);
get_addr_inc()973   int get_addr_inc ()
974   {
975     return m_addr_inc;
976   }
977 };
978 
979 /* Initialize a pieces_addr structure from an object OBJ.  IS_LOAD is
980    true if the operation to be performed on this object is a load
981    rather than a store.  For stores, OBJ can be NULL, in which case we
982    assume the operation is a stack push.  For loads, the optional
983    CONSTFN and its associated CFNDATA can be used in place of the
984    memory load.  */
985 
pieces_addr(rtx obj,bool is_load,by_pieces_constfn constfn,void * cfndata)986 pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn,
987 			  void *cfndata)
988   : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata)
989 {
990   m_addr_inc = 0;
991   m_auto = false;
992   if (obj)
993     {
994       rtx addr = XEXP (obj, 0);
995       rtx_code code = GET_CODE (addr);
996       m_addr = addr;
997       bool dec = code == PRE_DEC || code == POST_DEC;
998       bool inc = code == PRE_INC || code == POST_INC;
999       m_auto = inc || dec;
1000       if (m_auto)
1001 	m_addr_inc = dec ? -1 : 1;
1002 
1003       /* While we have always looked for these codes here, the code
1004 	 implementing the memory operation has never handled them.
1005 	 Support could be added later if necessary or beneficial.  */
1006       gcc_assert (code != PRE_INC && code != POST_DEC);
1007     }
1008   else
1009     {
1010       m_addr = NULL_RTX;
1011       if (!is_load)
1012 	{
1013 	  m_auto = true;
1014 	  if (STACK_GROWS_DOWNWARD)
1015 	    m_addr_inc = -1;
1016 	  else
1017 	    m_addr_inc = 1;
1018 	}
1019       else
1020 	gcc_assert (constfn != NULL);
1021     }
1022   m_explicit_inc = 0;
1023   if (constfn)
1024     gcc_assert (is_load);
1025 }
1026 
1027 /* Decide whether to use autoinc for an address involved in a memory op.
1028    MODE is the mode of the accesses, REVERSE is true if we've decided to
1029    perform the operation starting from the end, and LEN is the length of
1030    the operation.  Don't override an earlier decision to set m_auto.  */
1031 
1032 void
decide_autoinc(machine_mode ARG_UNUSED (mode),bool reverse,HOST_WIDE_INT len)1033 pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode), bool reverse,
1034 			     HOST_WIDE_INT len)
1035 {
1036   if (m_auto || m_obj == NULL_RTX)
1037     return;
1038 
1039   bool use_predec = (m_is_load
1040 		     ? USE_LOAD_PRE_DECREMENT (mode)
1041 		     : USE_STORE_PRE_DECREMENT (mode));
1042   bool use_postinc = (m_is_load
1043 		      ? USE_LOAD_POST_INCREMENT (mode)
1044 		      : USE_STORE_POST_INCREMENT (mode));
1045   machine_mode addr_mode = get_address_mode (m_obj);
1046 
1047   if (use_predec && reverse)
1048     {
1049       m_addr = copy_to_mode_reg (addr_mode,
1050 				 plus_constant (addr_mode,
1051 						m_addr, len));
1052       m_auto = true;
1053       m_explicit_inc = -1;
1054     }
1055   else if (use_postinc && !reverse)
1056     {
1057       m_addr = copy_to_mode_reg (addr_mode, m_addr);
1058       m_auto = true;
1059       m_explicit_inc = 1;
1060     }
1061   else if (CONSTANT_P (m_addr))
1062     m_addr = copy_to_mode_reg (addr_mode, m_addr);
1063 }
1064 
1065 /* Adjust the address to refer to the data at OFFSET in MODE.  If we
1066    are using autoincrement for this address, we don't add the offset,
1067    but we still modify the MEM's properties.  */
1068 
1069 rtx
adjust(fixed_size_mode mode,HOST_WIDE_INT offset,by_pieces_prev * prev)1070 pieces_addr::adjust (fixed_size_mode mode, HOST_WIDE_INT offset,
1071 		     by_pieces_prev *prev)
1072 {
1073   if (m_constfn)
1074     /* Pass the previous data to m_constfn.  */
1075     return m_constfn (m_cfndata, prev, offset, mode);
1076   if (m_obj == NULL_RTX)
1077     return NULL_RTX;
1078   if (m_auto)
1079     return adjust_automodify_address (m_obj, mode, m_addr, offset);
1080   else
1081     return adjust_address (m_obj, mode, offset);
1082 }
1083 
1084 /* Emit an add instruction to increment the address by SIZE.  */
1085 
1086 void
increment_address(HOST_WIDE_INT size)1087 pieces_addr::increment_address (HOST_WIDE_INT size)
1088 {
1089   rtx amount = gen_int_mode (size, GET_MODE (m_addr));
1090   emit_insn (gen_add2_insn (m_addr, amount));
1091 }
1092 
1093 /* If we are supposed to decrement the address after each access, emit code
1094    to do so now.  Increment by SIZE (which has should have the correct sign
1095    already).  */
1096 
1097 void
maybe_predec(HOST_WIDE_INT size)1098 pieces_addr::maybe_predec (HOST_WIDE_INT size)
1099 {
1100   if (m_explicit_inc >= 0)
1101     return;
1102   gcc_assert (HAVE_PRE_DECREMENT);
1103   increment_address (size);
1104 }
1105 
1106 /* If we are supposed to decrement the address after each access, emit code
1107    to do so now.  Increment by SIZE.  */
1108 
1109 void
maybe_postinc(HOST_WIDE_INT size)1110 pieces_addr::maybe_postinc (HOST_WIDE_INT size)
1111 {
1112   if (m_explicit_inc <= 0)
1113     return;
1114   gcc_assert (HAVE_POST_INCREMENT);
1115   increment_address (size);
1116 }
1117 
1118 /* This structure is used by do_op_by_pieces to describe the operation
1119    to be performed.  */
1120 
1121 class op_by_pieces_d
1122 {
1123  private:
1124   fixed_size_mode get_usable_mode (fixed_size_mode, unsigned int);
1125   fixed_size_mode smallest_fixed_size_mode_for_size (unsigned int);
1126 
1127  protected:
1128   pieces_addr m_to, m_from;
1129   /* Make m_len read-only so that smallest_fixed_size_mode_for_size can
1130      use it to check the valid mode size.  */
1131   const unsigned HOST_WIDE_INT m_len;
1132   HOST_WIDE_INT m_offset;
1133   unsigned int m_align;
1134   unsigned int m_max_size;
1135   bool m_reverse;
1136   /* True if this is a stack push.  */
1137   bool m_push;
1138   /* True if targetm.overlap_op_by_pieces_p () returns true.  */
1139   bool m_overlap_op_by_pieces;
1140   /* True if QI vector mode can be used.  */
1141   bool m_qi_vector_mode;
1142 
1143   /* Virtual functions, overriden by derived classes for the specific
1144      operation.  */
1145   virtual void generate (rtx, rtx, machine_mode) = 0;
1146   virtual bool prepare_mode (machine_mode, unsigned int) = 0;
finish_mode(machine_mode)1147   virtual void finish_mode (machine_mode)
1148   {
1149   }
1150 
1151  public:
1152   op_by_pieces_d (unsigned int, rtx, bool, rtx, bool, by_pieces_constfn,
1153 		  void *, unsigned HOST_WIDE_INT, unsigned int, bool,
1154 		  bool = false);
1155   void run ();
1156 };
1157 
1158 /* The constructor for an op_by_pieces_d structure.  We require two
1159    objects named TO and FROM, which are identified as loads or stores
1160    by TO_LOAD and FROM_LOAD.  If FROM is a load, the optional FROM_CFN
1161    and its associated FROM_CFN_DATA can be used to replace loads with
1162    constant values.  MAX_PIECES describes the maximum number of bytes
1163    at a time which can be moved efficiently.  LEN describes the length
1164    of the operation.  */
1165 
op_by_pieces_d(unsigned int max_pieces,rtx to,bool to_load,rtx from,bool from_load,by_pieces_constfn from_cfn,void * from_cfn_data,unsigned HOST_WIDE_INT len,unsigned int align,bool push,bool qi_vector_mode)1166 op_by_pieces_d::op_by_pieces_d (unsigned int max_pieces, rtx to,
1167 				bool to_load, rtx from, bool from_load,
1168 				by_pieces_constfn from_cfn,
1169 				void *from_cfn_data,
1170 				unsigned HOST_WIDE_INT len,
1171 				unsigned int align, bool push,
1172 				bool qi_vector_mode)
1173   : m_to (to, to_load, NULL, NULL),
1174     m_from (from, from_load, from_cfn, from_cfn_data),
1175     m_len (len), m_max_size (max_pieces + 1),
1176     m_push (push), m_qi_vector_mode (qi_vector_mode)
1177 {
1178   int toi = m_to.get_addr_inc ();
1179   int fromi = m_from.get_addr_inc ();
1180   if (toi >= 0 && fromi >= 0)
1181     m_reverse = false;
1182   else if (toi <= 0 && fromi <= 0)
1183     m_reverse = true;
1184   else
1185     gcc_unreachable ();
1186 
1187   m_offset = m_reverse ? len : 0;
1188   align = MIN (to ? MEM_ALIGN (to) : align,
1189 	       from ? MEM_ALIGN (from) : align);
1190 
1191   /* If copying requires more than two move insns,
1192      copy addresses to registers (to make displacements shorter)
1193      and use post-increment if available.  */
1194   if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2)
1195     {
1196       /* Find the mode of the largest comparison.  */
1197       fixed_size_mode mode
1198 	= widest_fixed_size_mode_for_size (m_max_size,
1199 					   m_qi_vector_mode);
1200 
1201       m_from.decide_autoinc (mode, m_reverse, len);
1202       m_to.decide_autoinc (mode, m_reverse, len);
1203     }
1204 
1205   align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
1206   m_align = align;
1207 
1208   m_overlap_op_by_pieces = targetm.overlap_op_by_pieces_p ();
1209 }
1210 
1211 /* This function returns the largest usable integer mode for LEN bytes
1212    whose size is no bigger than size of MODE.  */
1213 
1214 fixed_size_mode
get_usable_mode(fixed_size_mode mode,unsigned int len)1215 op_by_pieces_d::get_usable_mode (fixed_size_mode mode, unsigned int len)
1216 {
1217   unsigned int size;
1218   do
1219     {
1220       size = GET_MODE_SIZE (mode);
1221       if (len >= size && prepare_mode (mode, m_align))
1222 	break;
1223       /* widest_fixed_size_mode_for_size checks SIZE > 1.  */
1224       mode = widest_fixed_size_mode_for_size (size, m_qi_vector_mode);
1225     }
1226   while (1);
1227   return mode;
1228 }
1229 
1230 /* Return the smallest integer or QI vector mode that is not narrower
1231    than SIZE bytes.  */
1232 
1233 fixed_size_mode
smallest_fixed_size_mode_for_size(unsigned int size)1234 op_by_pieces_d::smallest_fixed_size_mode_for_size (unsigned int size)
1235 {
1236   /* Use QI vector only for > size of WORD.  */
1237   if (m_qi_vector_mode && size > UNITS_PER_WORD)
1238     {
1239       machine_mode mode;
1240       fixed_size_mode candidate;
1241       FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)
1242 	if (is_a<fixed_size_mode> (mode, &candidate)
1243 	    && GET_MODE_INNER (candidate) == QImode)
1244 	  {
1245 	    /* Don't return a mode wider than M_LEN.  */
1246 	    if (GET_MODE_SIZE (candidate) > m_len)
1247 	      break;
1248 
1249 	    if (GET_MODE_SIZE (candidate) >= size
1250 		&& (optab_handler (vec_duplicate_optab, candidate)
1251 		    != CODE_FOR_nothing))
1252 	      return candidate;
1253 	  }
1254     }
1255 
1256   return smallest_int_mode_for_size (size * BITS_PER_UNIT);
1257 }
1258 
1259 /* This function contains the main loop used for expanding a block
1260    operation.  First move what we can in the largest integer mode,
1261    then go to successively smaller modes.  For every access, call
1262    GENFUN with the two operands and the EXTRA_DATA.  */
1263 
1264 void
run()1265 op_by_pieces_d::run ()
1266 {
1267   if (m_len == 0)
1268     return;
1269 
1270   unsigned HOST_WIDE_INT length = m_len;
1271 
1272   /* widest_fixed_size_mode_for_size checks M_MAX_SIZE > 1.  */
1273   fixed_size_mode mode
1274     = widest_fixed_size_mode_for_size (m_max_size, m_qi_vector_mode);
1275   mode = get_usable_mode (mode, length);
1276 
1277   by_pieces_prev to_prev = { nullptr, mode };
1278   by_pieces_prev from_prev = { nullptr, mode };
1279 
1280   do
1281     {
1282       unsigned int size = GET_MODE_SIZE (mode);
1283       rtx to1 = NULL_RTX, from1;
1284 
1285       while (length >= size)
1286 	{
1287 	  if (m_reverse)
1288 	    m_offset -= size;
1289 
1290 	  to1 = m_to.adjust (mode, m_offset, &to_prev);
1291 	  to_prev.data = to1;
1292 	  to_prev.mode = mode;
1293 	  from1 = m_from.adjust (mode, m_offset, &from_prev);
1294 	  from_prev.data = from1;
1295 	  from_prev.mode = mode;
1296 
1297 	  m_to.maybe_predec (-(HOST_WIDE_INT)size);
1298 	  m_from.maybe_predec (-(HOST_WIDE_INT)size);
1299 
1300 	  generate (to1, from1, mode);
1301 
1302 	  m_to.maybe_postinc (size);
1303 	  m_from.maybe_postinc (size);
1304 
1305 	  if (!m_reverse)
1306 	    m_offset += size;
1307 
1308 	  length -= size;
1309 	}
1310 
1311       finish_mode (mode);
1312 
1313       if (length == 0)
1314 	return;
1315 
1316       if (!m_push && m_overlap_op_by_pieces)
1317 	{
1318 	  /* NB: Generate overlapping operations if it is not a stack
1319 	     push since stack push must not overlap.  Get the smallest
1320 	     fixed size mode for M_LEN bytes.  */
1321 	  mode = smallest_fixed_size_mode_for_size (length);
1322 	  mode = get_usable_mode (mode, GET_MODE_SIZE (mode));
1323 	  int gap = GET_MODE_SIZE (mode) - length;
1324 	  if (gap > 0)
1325 	    {
1326 	      /* If size of MODE > M_LEN, generate the last operation
1327 		 in MODE for the remaining bytes with ovelapping memory
1328 		 from the previois operation.  */
1329 	      if (m_reverse)
1330 		m_offset += gap;
1331 	      else
1332 		m_offset -= gap;
1333 	      length += gap;
1334 	    }
1335 	}
1336       else
1337 	{
1338 	  /* widest_fixed_size_mode_for_size checks SIZE > 1.  */
1339 	  mode = widest_fixed_size_mode_for_size (size,
1340 						  m_qi_vector_mode);
1341 	  mode = get_usable_mode (mode, length);
1342 	}
1343     }
1344   while (1);
1345 }
1346 
1347 /* Derived class from op_by_pieces_d, providing support for block move
1348    operations.  */
1349 
1350 #ifdef PUSH_ROUNDING
1351 #define PUSHG_P(to)  ((to) == nullptr)
1352 #else
1353 #define PUSHG_P(to)  false
1354 #endif
1355 
1356 class move_by_pieces_d : public op_by_pieces_d
1357 {
1358   insn_gen_fn m_gen_fun;
1359   void generate (rtx, rtx, machine_mode);
1360   bool prepare_mode (machine_mode, unsigned int);
1361 
1362  public:
move_by_pieces_d(rtx to,rtx from,unsigned HOST_WIDE_INT len,unsigned int align)1363   move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1364 		    unsigned int align)
1365     : op_by_pieces_d (MOVE_MAX_PIECES, to, false, from, true, NULL,
1366 		      NULL, len, align, PUSHG_P (to))
1367   {
1368   }
1369   rtx finish_retmode (memop_ret);
1370 };
1371 
1372 /* Return true if MODE can be used for a set of copies, given an
1373    alignment ALIGN.  Prepare whatever data is necessary for later
1374    calls to generate.  */
1375 
1376 bool
prepare_mode(machine_mode mode,unsigned int align)1377 move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1378 {
1379   insn_code icode = optab_handler (mov_optab, mode);
1380   m_gen_fun = GEN_FCN (icode);
1381   return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1382 }
1383 
1384 /* A callback used when iterating for a compare_by_pieces_operation.
1385    OP0 and OP1 are the values that have been loaded and should be
1386    compared in MODE.  If OP0 is NULL, this means we should generate a
1387    push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1388    gen function that should be used to generate the mode.  */
1389 
1390 void
generate(rtx op0,rtx op1,machine_mode mode ATTRIBUTE_UNUSED)1391 move_by_pieces_d::generate (rtx op0, rtx op1,
1392 			    machine_mode mode ATTRIBUTE_UNUSED)
1393 {
1394 #ifdef PUSH_ROUNDING
1395   if (op0 == NULL_RTX)
1396     {
1397       emit_single_push_insn (mode, op1, NULL);
1398       return;
1399     }
1400 #endif
1401   emit_insn (m_gen_fun (op0, op1));
1402 }
1403 
1404 /* Perform the final adjustment at the end of a string to obtain the
1405    correct return value for the block operation.
1406    Return value is based on RETMODE argument.  */
1407 
1408 rtx
finish_retmode(memop_ret retmode)1409 move_by_pieces_d::finish_retmode (memop_ret retmode)
1410 {
1411   gcc_assert (!m_reverse);
1412   if (retmode == RETURN_END_MINUS_ONE)
1413     {
1414       m_to.maybe_postinc (-1);
1415       --m_offset;
1416     }
1417   return m_to.adjust (QImode, m_offset);
1418 }
1419 
1420 /* Generate several move instructions to copy LEN bytes from block FROM to
1421    block TO.  (These are MEM rtx's with BLKmode).
1422 
1423    If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1424    used to push FROM to the stack.
1425 
1426    ALIGN is maximum stack alignment we can assume.
1427 
1428    Return value is based on RETMODE argument.  */
1429 
1430 rtx
move_by_pieces(rtx to,rtx from,unsigned HOST_WIDE_INT len,unsigned int align,memop_ret retmode)1431 move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1432 		unsigned int align, memop_ret retmode)
1433 {
1434 #ifndef PUSH_ROUNDING
1435   if (to == NULL)
1436     gcc_unreachable ();
1437 #endif
1438 
1439   move_by_pieces_d data (to, from, len, align);
1440 
1441   data.run ();
1442 
1443   if (retmode != RETURN_BEGIN)
1444     return data.finish_retmode (retmode);
1445   else
1446     return to;
1447 }
1448 
1449 /* Derived class from op_by_pieces_d, providing support for block move
1450    operations.  */
1451 
1452 class store_by_pieces_d : public op_by_pieces_d
1453 {
1454   insn_gen_fn m_gen_fun;
1455   void generate (rtx, rtx, machine_mode);
1456   bool prepare_mode (machine_mode, unsigned int);
1457 
1458  public:
store_by_pieces_d(rtx to,by_pieces_constfn cfn,void * cfn_data,unsigned HOST_WIDE_INT len,unsigned int align,bool qi_vector_mode)1459   store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data,
1460 		     unsigned HOST_WIDE_INT len, unsigned int align,
1461 		     bool qi_vector_mode)
1462     : op_by_pieces_d (STORE_MAX_PIECES, to, false, NULL_RTX, true, cfn,
1463 		      cfn_data, len, align, false, qi_vector_mode)
1464   {
1465   }
1466   rtx finish_retmode (memop_ret);
1467 };
1468 
1469 /* Return true if MODE can be used for a set of stores, given an
1470    alignment ALIGN.  Prepare whatever data is necessary for later
1471    calls to generate.  */
1472 
1473 bool
prepare_mode(machine_mode mode,unsigned int align)1474 store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1475 {
1476   insn_code icode = optab_handler (mov_optab, mode);
1477   m_gen_fun = GEN_FCN (icode);
1478   return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1479 }
1480 
1481 /* A callback used when iterating for a store_by_pieces_operation.
1482    OP0 and OP1 are the values that have been loaded and should be
1483    compared in MODE.  If OP0 is NULL, this means we should generate a
1484    push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1485    gen function that should be used to generate the mode.  */
1486 
1487 void
generate(rtx op0,rtx op1,machine_mode)1488 store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode)
1489 {
1490   emit_insn (m_gen_fun (op0, op1));
1491 }
1492 
1493 /* Perform the final adjustment at the end of a string to obtain the
1494    correct return value for the block operation.
1495    Return value is based on RETMODE argument.  */
1496 
1497 rtx
finish_retmode(memop_ret retmode)1498 store_by_pieces_d::finish_retmode (memop_ret retmode)
1499 {
1500   gcc_assert (!m_reverse);
1501   if (retmode == RETURN_END_MINUS_ONE)
1502     {
1503       m_to.maybe_postinc (-1);
1504       --m_offset;
1505     }
1506   return m_to.adjust (QImode, m_offset);
1507 }
1508 
1509 /* Determine whether the LEN bytes generated by CONSTFUN can be
1510    stored to memory using several move instructions.  CONSTFUNDATA is
1511    a pointer which will be passed as argument in every CONSTFUN call.
1512    ALIGN is maximum alignment we can assume.  MEMSETP is true if this is
1513    a memset operation and false if it's a copy of a constant string.
1514    Return nonzero if a call to store_by_pieces should succeed.  */
1515 
1516 int
can_store_by_pieces(unsigned HOST_WIDE_INT len,by_pieces_constfn constfun,void * constfundata,unsigned int align,bool memsetp)1517 can_store_by_pieces (unsigned HOST_WIDE_INT len,
1518 		     by_pieces_constfn constfun,
1519 		     void *constfundata, unsigned int align, bool memsetp)
1520 {
1521   unsigned HOST_WIDE_INT l;
1522   unsigned int max_size;
1523   HOST_WIDE_INT offset = 0;
1524   enum insn_code icode;
1525   int reverse;
1526   /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it.  */
1527   rtx cst ATTRIBUTE_UNUSED;
1528 
1529   if (len == 0)
1530     return 1;
1531 
1532   if (!targetm.use_by_pieces_infrastructure_p (len, align,
1533 					       memsetp
1534 						 ? SET_BY_PIECES
1535 						 : STORE_BY_PIECES,
1536 					       optimize_insn_for_speed_p ()))
1537     return 0;
1538 
1539   align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
1540 
1541   /* We would first store what we can in the largest integer mode, then go to
1542      successively smaller modes.  */
1543 
1544   for (reverse = 0;
1545        reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT);
1546        reverse++)
1547     {
1548       l = len;
1549       max_size = STORE_MAX_PIECES + 1;
1550       while (max_size > 1 && l > 0)
1551 	{
1552 	  fixed_size_mode mode
1553 	    = widest_fixed_size_mode_for_size (max_size, memsetp);
1554 
1555 	  icode = optab_handler (mov_optab, mode);
1556 	  if (icode != CODE_FOR_nothing
1557 	      && align >= GET_MODE_ALIGNMENT (mode))
1558 	    {
1559 	      unsigned int size = GET_MODE_SIZE (mode);
1560 
1561 	      while (l >= size)
1562 		{
1563 		  if (reverse)
1564 		    offset -= size;
1565 
1566 		  cst = (*constfun) (constfundata, nullptr, offset, mode);
1567 		  /* All CONST_VECTORs can be loaded for memset since
1568 		     vec_duplicate_optab is a precondition to pick a
1569 		     vector mode for the memset expander.  */
1570 		  if (!((memsetp && VECTOR_MODE_P (mode))
1571 			|| targetm.legitimate_constant_p (mode, cst)))
1572 		    return 0;
1573 
1574 		  if (!reverse)
1575 		    offset += size;
1576 
1577 		  l -= size;
1578 		}
1579 	    }
1580 
1581 	  max_size = GET_MODE_SIZE (mode);
1582 	}
1583 
1584       /* The code above should have handled everything.  */
1585       gcc_assert (!l);
1586     }
1587 
1588   return 1;
1589 }
1590 
1591 /* Generate several move instructions to store LEN bytes generated by
1592    CONSTFUN to block TO.  (A MEM rtx with BLKmode).  CONSTFUNDATA is a
1593    pointer which will be passed as argument in every CONSTFUN call.
1594    ALIGN is maximum alignment we can assume.  MEMSETP is true if this is
1595    a memset operation and false if it's a copy of a constant string.
1596    Return value is based on RETMODE argument.  */
1597 
1598 rtx
store_by_pieces(rtx to,unsigned HOST_WIDE_INT len,by_pieces_constfn constfun,void * constfundata,unsigned int align,bool memsetp,memop_ret retmode)1599 store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
1600 		 by_pieces_constfn constfun,
1601 		 void *constfundata, unsigned int align, bool memsetp,
1602 		 memop_ret retmode)
1603 {
1604   if (len == 0)
1605     {
1606       gcc_assert (retmode != RETURN_END_MINUS_ONE);
1607       return to;
1608     }
1609 
1610   gcc_assert (targetm.use_by_pieces_infrastructure_p
1611 		(len, align,
1612 		 memsetp ? SET_BY_PIECES : STORE_BY_PIECES,
1613 		 optimize_insn_for_speed_p ()));
1614 
1615   store_by_pieces_d data (to, constfun, constfundata, len, align,
1616 			  memsetp);
1617   data.run ();
1618 
1619   if (retmode != RETURN_BEGIN)
1620     return data.finish_retmode (retmode);
1621   else
1622     return to;
1623 }
1624 
1625 /* Generate several move instructions to clear LEN bytes of block TO.  (A MEM
1626    rtx with BLKmode).  ALIGN is maximum alignment we can assume.  */
1627 
1628 static void
clear_by_pieces(rtx to,unsigned HOST_WIDE_INT len,unsigned int align)1629 clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
1630 {
1631   if (len == 0)
1632     return;
1633 
1634   /* Use builtin_memset_read_str to support vector mode broadcast.  */
1635   char c = 0;
1636   store_by_pieces_d data (to, builtin_memset_read_str, &c, len, align,
1637 			  true);
1638   data.run ();
1639 }
1640 
1641 /* Context used by compare_by_pieces_genfn.  It stores the fail label
1642    to jump to in case of miscomparison, and for branch ratios greater than 1,
1643    it stores an accumulator and the current and maximum counts before
1644    emitting another branch.  */
1645 
1646 class compare_by_pieces_d : public op_by_pieces_d
1647 {
1648   rtx_code_label *m_fail_label;
1649   rtx m_accumulator;
1650   int m_count, m_batch;
1651 
1652   void generate (rtx, rtx, machine_mode);
1653   bool prepare_mode (machine_mode, unsigned int);
1654   void finish_mode (machine_mode);
1655  public:
compare_by_pieces_d(rtx op0,rtx op1,by_pieces_constfn op1_cfn,void * op1_cfn_data,HOST_WIDE_INT len,int align,rtx_code_label * fail_label)1656   compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn,
1657 		       void *op1_cfn_data, HOST_WIDE_INT len, int align,
1658 		       rtx_code_label *fail_label)
1659     : op_by_pieces_d (COMPARE_MAX_PIECES, op0, true, op1, true, op1_cfn,
1660 		      op1_cfn_data, len, align, false)
1661   {
1662     m_fail_label = fail_label;
1663   }
1664 };
1665 
1666 /* A callback used when iterating for a compare_by_pieces_operation.
1667    OP0 and OP1 are the values that have been loaded and should be
1668    compared in MODE.  DATA holds a pointer to the compare_by_pieces_data
1669    context structure.  */
1670 
1671 void
generate(rtx op0,rtx op1,machine_mode mode)1672 compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode)
1673 {
1674   if (m_batch > 1)
1675     {
1676       rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX,
1677 			       true, OPTAB_LIB_WIDEN);
1678       if (m_count != 0)
1679 	temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp,
1680 			     true, OPTAB_LIB_WIDEN);
1681       m_accumulator = temp;
1682 
1683       if (++m_count < m_batch)
1684 	return;
1685 
1686       m_count = 0;
1687       op0 = m_accumulator;
1688       op1 = const0_rtx;
1689       m_accumulator = NULL_RTX;
1690     }
1691   do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX, NULL,
1692 			   m_fail_label, profile_probability::uninitialized ());
1693 }
1694 
1695 /* Return true if MODE can be used for a set of moves and comparisons,
1696    given an alignment ALIGN.  Prepare whatever data is necessary for
1697    later calls to generate.  */
1698 
1699 bool
prepare_mode(machine_mode mode,unsigned int align)1700 compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1701 {
1702   insn_code icode = optab_handler (mov_optab, mode);
1703   if (icode == CODE_FOR_nothing
1704       || align < GET_MODE_ALIGNMENT (mode)
1705       || !can_compare_p (EQ, mode, ccp_jump))
1706     return false;
1707   m_batch = targetm.compare_by_pieces_branch_ratio (mode);
1708   if (m_batch < 0)
1709     return false;
1710   m_accumulator = NULL_RTX;
1711   m_count = 0;
1712   return true;
1713 }
1714 
1715 /* Called after expanding a series of comparisons in MODE.  If we have
1716    accumulated results for which we haven't emitted a branch yet, do
1717    so now.  */
1718 
1719 void
finish_mode(machine_mode mode)1720 compare_by_pieces_d::finish_mode (machine_mode mode)
1721 {
1722   if (m_accumulator != NULL_RTX)
1723     do_compare_rtx_and_jump (m_accumulator, const0_rtx, NE, true, mode,
1724 			     NULL_RTX, NULL, m_fail_label,
1725 			     profile_probability::uninitialized ());
1726 }
1727 
1728 /* Generate several move instructions to compare LEN bytes from blocks
1729    ARG0 and ARG1.  (These are MEM rtx's with BLKmode).
1730 
1731    If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1732    used to push FROM to the stack.
1733 
1734    ALIGN is maximum stack alignment we can assume.
1735 
1736    Optionally, the caller can pass a constfn and associated data in A1_CFN
1737    and A1_CFN_DATA. describing that the second operand being compared is a
1738    known constant and how to obtain its data.  */
1739 
1740 static rtx
compare_by_pieces(rtx arg0,rtx arg1,unsigned HOST_WIDE_INT len,rtx target,unsigned int align,by_pieces_constfn a1_cfn,void * a1_cfn_data)1741 compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
1742 		   rtx target, unsigned int align,
1743 		   by_pieces_constfn a1_cfn, void *a1_cfn_data)
1744 {
1745   rtx_code_label *fail_label = gen_label_rtx ();
1746   rtx_code_label *end_label = gen_label_rtx ();
1747 
1748   if (target == NULL_RTX
1749       || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
1750     target = gen_reg_rtx (TYPE_MODE (integer_type_node));
1751 
1752   compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align,
1753 			    fail_label);
1754 
1755   data.run ();
1756 
1757   emit_move_insn (target, const0_rtx);
1758   emit_jump (end_label);
1759   emit_barrier ();
1760   emit_label (fail_label);
1761   emit_move_insn (target, const1_rtx);
1762   emit_label (end_label);
1763 
1764   return target;
1765 }
1766 
1767 /* Emit code to move a block Y to a block X.  This may be done with
1768    string-move instructions, with multiple scalar move instructions,
1769    or with a library call.
1770 
1771    Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1772    SIZE is an rtx that says how long they are.
1773    ALIGN is the maximum alignment we can assume they have.
1774    METHOD describes what kind of copy this is, and what mechanisms may be used.
1775    MIN_SIZE is the minimal size of block to move
1776    MAX_SIZE is the maximal size of block to move, if it cannot be represented
1777    in unsigned HOST_WIDE_INT, than it is mask of all ones.
1778 
1779    Return the address of the new block, if memcpy is called and returns it,
1780    0 otherwise.  */
1781 
1782 rtx
emit_block_move_hints(rtx x,rtx y,rtx size,enum block_op_methods method,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size,bool bail_out_libcall,bool * is_move_done,bool might_overlap)1783 emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
1784 		       unsigned int expected_align, HOST_WIDE_INT expected_size,
1785 		       unsigned HOST_WIDE_INT min_size,
1786 		       unsigned HOST_WIDE_INT max_size,
1787 		       unsigned HOST_WIDE_INT probable_max_size,
1788 		       bool bail_out_libcall, bool *is_move_done,
1789 		       bool might_overlap)
1790 {
1791   int may_use_call;
1792   rtx retval = 0;
1793   unsigned int align;
1794 
1795   if (is_move_done)
1796     *is_move_done = true;
1797 
1798   gcc_assert (size);
1799   if (CONST_INT_P (size) && INTVAL (size) == 0)
1800     return 0;
1801 
1802   switch (method)
1803     {
1804     case BLOCK_OP_NORMAL:
1805     case BLOCK_OP_TAILCALL:
1806       may_use_call = 1;
1807       break;
1808 
1809     case BLOCK_OP_CALL_PARM:
1810       may_use_call = block_move_libcall_safe_for_call_parm ();
1811 
1812       /* Make inhibit_defer_pop nonzero around the library call
1813 	 to force it to pop the arguments right away.  */
1814       NO_DEFER_POP;
1815       break;
1816 
1817     case BLOCK_OP_NO_LIBCALL:
1818       may_use_call = 0;
1819       break;
1820 
1821     case BLOCK_OP_NO_LIBCALL_RET:
1822       may_use_call = -1;
1823       break;
1824 
1825     default:
1826       gcc_unreachable ();
1827     }
1828 
1829   gcc_assert (MEM_P (x) && MEM_P (y));
1830   align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1831   gcc_assert (align >= BITS_PER_UNIT);
1832 
1833   /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1834      block copy is more efficient for other large modes, e.g. DCmode.  */
1835   x = adjust_address (x, BLKmode, 0);
1836   y = adjust_address (y, BLKmode, 0);
1837 
1838   /* If source and destination are the same, no need to copy anything.  */
1839   if (rtx_equal_p (x, y)
1840       && !MEM_VOLATILE_P (x)
1841       && !MEM_VOLATILE_P (y))
1842     return 0;
1843 
1844   /* Set MEM_SIZE as appropriate for this block copy.  The main place this
1845      can be incorrect is coming from __builtin_memcpy.  */
1846   poly_int64 const_size;
1847   if (poly_int_rtx_p (size, &const_size))
1848     {
1849       x = shallow_copy_rtx (x);
1850       y = shallow_copy_rtx (y);
1851       set_mem_size (x, const_size);
1852       set_mem_size (y, const_size);
1853     }
1854 
1855   bool pieces_ok = CONST_INT_P (size)
1856     && can_move_by_pieces (INTVAL (size), align);
1857   bool pattern_ok = false;
1858 
1859   if (!pieces_ok || might_overlap)
1860     {
1861       pattern_ok
1862 	= emit_block_move_via_pattern (x, y, size, align,
1863 				       expected_align, expected_size,
1864 				       min_size, max_size, probable_max_size,
1865 				       might_overlap);
1866       if (!pattern_ok && might_overlap)
1867 	{
1868 	  /* Do not try any of the other methods below as they are not safe
1869 	     for overlapping moves.  */
1870 	  *is_move_done = false;
1871 	  return retval;
1872 	}
1873     }
1874 
1875   if (pattern_ok)
1876     ;
1877   else if (pieces_ok)
1878     move_by_pieces (x, y, INTVAL (size), align, RETURN_BEGIN);
1879   else if (may_use_call && !might_overlap
1880 	   && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
1881 	   && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
1882     {
1883       if (bail_out_libcall)
1884 	{
1885 	  if (is_move_done)
1886 	    *is_move_done = false;
1887 	  return retval;
1888 	}
1889 
1890       if (may_use_call < 0)
1891 	return pc_rtx;
1892 
1893       retval = emit_block_copy_via_libcall (x, y, size,
1894 					    method == BLOCK_OP_TAILCALL);
1895     }
1896   else if (might_overlap)
1897     *is_move_done = false;
1898   else
1899     emit_block_move_via_loop (x, y, size, align);
1900 
1901   if (method == BLOCK_OP_CALL_PARM)
1902     OK_DEFER_POP;
1903 
1904   return retval;
1905 }
1906 
1907 rtx
emit_block_move(rtx x,rtx y,rtx size,enum block_op_methods method)1908 emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
1909 {
1910   unsigned HOST_WIDE_INT max, min = 0;
1911   if (GET_CODE (size) == CONST_INT)
1912     min = max = UINTVAL (size);
1913   else
1914     max = GET_MODE_MASK (GET_MODE (size));
1915   return emit_block_move_hints (x, y, size, method, 0, -1,
1916 				min, max, max);
1917 }
1918 
1919 /* A subroutine of emit_block_move.  Returns true if calling the
1920    block move libcall will not clobber any parameters which may have
1921    already been placed on the stack.  */
1922 
1923 static bool
block_move_libcall_safe_for_call_parm(void)1924 block_move_libcall_safe_for_call_parm (void)
1925 {
1926   tree fn;
1927 
1928   /* If arguments are pushed on the stack, then they're safe.  */
1929   if (targetm.calls.push_argument (0))
1930     return true;
1931 
1932   /* If registers go on the stack anyway, any argument is sure to clobber
1933      an outgoing argument.  */
1934 #if defined (REG_PARM_STACK_SPACE)
1935   fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1936   /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1937      depend on its argument.  */
1938   (void) fn;
1939   if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
1940       && REG_PARM_STACK_SPACE (fn) != 0)
1941     return false;
1942 #endif
1943 
1944   /* If any argument goes in memory, then it might clobber an outgoing
1945      argument.  */
1946   {
1947     CUMULATIVE_ARGS args_so_far_v;
1948     cumulative_args_t args_so_far;
1949     tree arg;
1950 
1951     fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1952     INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
1953     args_so_far = pack_cumulative_args (&args_so_far_v);
1954 
1955     arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
1956     for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
1957       {
1958 	machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
1959 	function_arg_info arg_info (mode, /*named=*/true);
1960 	rtx tmp = targetm.calls.function_arg (args_so_far, arg_info);
1961 	if (!tmp || !REG_P (tmp))
1962 	  return false;
1963 	if (targetm.calls.arg_partial_bytes (args_so_far, arg_info))
1964 	  return false;
1965 	targetm.calls.function_arg_advance (args_so_far, arg_info);
1966       }
1967   }
1968   return true;
1969 }
1970 
1971 /* A subroutine of emit_block_move.  Expand a cpymem or movmem pattern;
1972    return true if successful.
1973 
1974    X is the destination of the copy or move.
1975    Y is the source of the copy or move.
1976    SIZE is the size of the block to be moved.
1977 
1978    MIGHT_OVERLAP indicates this originated with expansion of a
1979    builtin_memmove() and the source and destination blocks may
1980    overlap.
1981   */
1982 
1983 static bool
emit_block_move_via_pattern(rtx x,rtx y,rtx size,unsigned int align,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size,bool might_overlap)1984 emit_block_move_via_pattern (rtx x, rtx y, rtx size, unsigned int align,
1985 			     unsigned int expected_align,
1986 			     HOST_WIDE_INT expected_size,
1987 			     unsigned HOST_WIDE_INT min_size,
1988 			     unsigned HOST_WIDE_INT max_size,
1989 			     unsigned HOST_WIDE_INT probable_max_size,
1990 			     bool might_overlap)
1991 {
1992   if (expected_align < align)
1993     expected_align = align;
1994   if (expected_size != -1)
1995     {
1996       if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
1997 	expected_size = probable_max_size;
1998       if ((unsigned HOST_WIDE_INT)expected_size < min_size)
1999 	expected_size = min_size;
2000     }
2001 
2002   /* Since this is a move insn, we don't care about volatility.  */
2003   temporary_volatile_ok v (true);
2004 
2005   /* Try the most limited insn first, because there's no point
2006      including more than one in the machine description unless
2007      the more limited one has some advantage.  */
2008 
2009   opt_scalar_int_mode mode_iter;
2010   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2011     {
2012       scalar_int_mode mode = mode_iter.require ();
2013       enum insn_code code;
2014       if (might_overlap)
2015 	code = direct_optab_handler (movmem_optab, mode);
2016       else
2017 	code = direct_optab_handler (cpymem_optab, mode);
2018 
2019       if (code != CODE_FOR_nothing
2020 	  /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
2021 	     here because if SIZE is less than the mode mask, as it is
2022 	     returned by the macro, it will definitely be less than the
2023 	     actual mode mask.  Since SIZE is within the Pmode address
2024 	     space, we limit MODE to Pmode.  */
2025 	  && ((CONST_INT_P (size)
2026 	       && ((unsigned HOST_WIDE_INT) INTVAL (size)
2027 		   <= (GET_MODE_MASK (mode) >> 1)))
2028 	      || max_size <= (GET_MODE_MASK (mode) >> 1)
2029 	      || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
2030 	{
2031 	  class expand_operand ops[9];
2032 	  unsigned int nops;
2033 
2034 	  /* ??? When called via emit_block_move_for_call, it'd be
2035 	     nice if there were some way to inform the backend, so
2036 	     that it doesn't fail the expansion because it thinks
2037 	     emitting the libcall would be more efficient.  */
2038 	  nops = insn_data[(int) code].n_generator_args;
2039 	  gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
2040 
2041 	  create_fixed_operand (&ops[0], x);
2042 	  create_fixed_operand (&ops[1], y);
2043 	  /* The check above guarantees that this size conversion is valid.  */
2044 	  create_convert_operand_to (&ops[2], size, mode, true);
2045 	  create_integer_operand (&ops[3], align / BITS_PER_UNIT);
2046 	  if (nops >= 6)
2047 	    {
2048 	      create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
2049 	      create_integer_operand (&ops[5], expected_size);
2050 	    }
2051 	  if (nops >= 8)
2052 	    {
2053 	      create_integer_operand (&ops[6], min_size);
2054 	      /* If we cannot represent the maximal size,
2055 		 make parameter NULL.  */
2056 	      if ((HOST_WIDE_INT) max_size != -1)
2057 	        create_integer_operand (&ops[7], max_size);
2058 	      else
2059 		create_fixed_operand (&ops[7], NULL);
2060 	    }
2061 	  if (nops == 9)
2062 	    {
2063 	      /* If we cannot represent the maximal size,
2064 		 make parameter NULL.  */
2065 	      if ((HOST_WIDE_INT) probable_max_size != -1)
2066 	        create_integer_operand (&ops[8], probable_max_size);
2067 	      else
2068 		create_fixed_operand (&ops[8], NULL);
2069 	    }
2070 	  if (maybe_expand_insn (code, nops, ops))
2071 	    return true;
2072 	}
2073     }
2074 
2075   return false;
2076 }
2077 
2078 /* A subroutine of emit_block_move.  Copy the data via an explicit
2079    loop.  This is used only when libcalls are forbidden.  */
2080 /* ??? It'd be nice to copy in hunks larger than QImode.  */
2081 
2082 static void
emit_block_move_via_loop(rtx x,rtx y,rtx size,unsigned int align ATTRIBUTE_UNUSED)2083 emit_block_move_via_loop (rtx x, rtx y, rtx size,
2084 			  unsigned int align ATTRIBUTE_UNUSED)
2085 {
2086   rtx_code_label *cmp_label, *top_label;
2087   rtx iter, x_addr, y_addr, tmp;
2088   machine_mode x_addr_mode = get_address_mode (x);
2089   machine_mode y_addr_mode = get_address_mode (y);
2090   machine_mode iter_mode;
2091 
2092   iter_mode = GET_MODE (size);
2093   if (iter_mode == VOIDmode)
2094     iter_mode = word_mode;
2095 
2096   top_label = gen_label_rtx ();
2097   cmp_label = gen_label_rtx ();
2098   iter = gen_reg_rtx (iter_mode);
2099 
2100   emit_move_insn (iter, const0_rtx);
2101 
2102   x_addr = force_operand (XEXP (x, 0), NULL_RTX);
2103   y_addr = force_operand (XEXP (y, 0), NULL_RTX);
2104   do_pending_stack_adjust ();
2105 
2106   emit_jump (cmp_label);
2107   emit_label (top_label);
2108 
2109   tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
2110   x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
2111 
2112   if (x_addr_mode != y_addr_mode)
2113     tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
2114   y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
2115 
2116   x = change_address (x, QImode, x_addr);
2117   y = change_address (y, QImode, y_addr);
2118 
2119   emit_move_insn (x, y);
2120 
2121   tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
2122 			     true, OPTAB_LIB_WIDEN);
2123   if (tmp != iter)
2124     emit_move_insn (iter, tmp);
2125 
2126   emit_label (cmp_label);
2127 
2128   emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
2129 			   true, top_label,
2130 			   profile_probability::guessed_always ()
2131 				.apply_scale (9, 10));
2132 }
2133 
2134 /* Expand a call to memcpy or memmove or memcmp, and return the result.
2135    TAILCALL is true if this is a tail call.  */
2136 
2137 rtx
emit_block_op_via_libcall(enum built_in_function fncode,rtx dst,rtx src,rtx size,bool tailcall)2138 emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src,
2139 			   rtx size, bool tailcall)
2140 {
2141   rtx dst_addr, src_addr;
2142   tree call_expr, dst_tree, src_tree, size_tree;
2143   machine_mode size_mode;
2144 
2145   /* Since dst and src are passed to a libcall, mark the corresponding
2146      tree EXPR as addressable.  */
2147   tree dst_expr = MEM_EXPR (dst);
2148   tree src_expr = MEM_EXPR (src);
2149   if (dst_expr)
2150     mark_addressable (dst_expr);
2151   if (src_expr)
2152     mark_addressable (src_expr);
2153 
2154   dst_addr = copy_addr_to_reg (XEXP (dst, 0));
2155   dst_addr = convert_memory_address (ptr_mode, dst_addr);
2156   dst_tree = make_tree (ptr_type_node, dst_addr);
2157 
2158   src_addr = copy_addr_to_reg (XEXP (src, 0));
2159   src_addr = convert_memory_address (ptr_mode, src_addr);
2160   src_tree = make_tree (ptr_type_node, src_addr);
2161 
2162   size_mode = TYPE_MODE (sizetype);
2163   size = convert_to_mode (size_mode, size, 1);
2164   size = copy_to_mode_reg (size_mode, size);
2165   size_tree = make_tree (sizetype, size);
2166 
2167   /* It is incorrect to use the libcall calling conventions for calls to
2168      memcpy/memmove/memcmp because they can be provided by the user.  */
2169   tree fn = builtin_decl_implicit (fncode);
2170   call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
2171   CALL_EXPR_TAILCALL (call_expr) = tailcall;
2172 
2173   return expand_call (call_expr, NULL_RTX, false);
2174 }
2175 
2176 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
2177    ARG3_TYPE is the type of ARG3_RTX.  Return the result rtx on success,
2178    otherwise return null.  */
2179 
2180 rtx
expand_cmpstrn_or_cmpmem(insn_code icode,rtx target,rtx arg1_rtx,rtx arg2_rtx,tree arg3_type,rtx arg3_rtx,HOST_WIDE_INT align)2181 expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
2182 			  rtx arg2_rtx, tree arg3_type, rtx arg3_rtx,
2183 			  HOST_WIDE_INT align)
2184 {
2185   machine_mode insn_mode = insn_data[icode].operand[0].mode;
2186 
2187   if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
2188     target = NULL_RTX;
2189 
2190   class expand_operand ops[5];
2191   create_output_operand (&ops[0], target, insn_mode);
2192   create_fixed_operand (&ops[1], arg1_rtx);
2193   create_fixed_operand (&ops[2], arg2_rtx);
2194   create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type),
2195 			       TYPE_UNSIGNED (arg3_type));
2196   create_integer_operand (&ops[4], align);
2197   if (maybe_expand_insn (icode, 5, ops))
2198     return ops[0].value;
2199   return NULL_RTX;
2200 }
2201 
2202 /* Expand a block compare between X and Y with length LEN using the
2203    cmpmem optab, placing the result in TARGET.  LEN_TYPE is the type
2204    of the expression that was used to calculate the length.  ALIGN
2205    gives the known minimum common alignment.  */
2206 
2207 static rtx
emit_block_cmp_via_cmpmem(rtx x,rtx y,rtx len,tree len_type,rtx target,unsigned align)2208 emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
2209 			   unsigned align)
2210 {
2211   /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
2212      implementing memcmp because it will stop if it encounters two
2213      zero bytes.  */
2214   insn_code icode = direct_optab_handler (cmpmem_optab, SImode);
2215 
2216   if (icode == CODE_FOR_nothing)
2217     return NULL_RTX;
2218 
2219   return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align);
2220 }
2221 
2222 /* Emit code to compare a block Y to a block X.  This may be done with
2223    string-compare instructions, with multiple scalar instructions,
2224    or with a library call.
2225 
2226    Both X and Y must be MEM rtx's.  LEN is an rtx that says how long
2227    they are.  LEN_TYPE is the type of the expression that was used to
2228    calculate it.
2229 
2230    If EQUALITY_ONLY is true, it means we don't have to return the tri-state
2231    value of a normal memcmp call, instead we can just compare for equality.
2232    If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
2233    returning NULL_RTX.
2234 
2235    Optionally, the caller can pass a constfn and associated data in Y_CFN
2236    and Y_CFN_DATA. describing that the second operand being compared is a
2237    known constant and how to obtain its data.
2238    Return the result of the comparison, or NULL_RTX if we failed to
2239    perform the operation.  */
2240 
2241 rtx
emit_block_cmp_hints(rtx x,rtx y,rtx len,tree len_type,rtx target,bool equality_only,by_pieces_constfn y_cfn,void * y_cfndata)2242 emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
2243 		      bool equality_only, by_pieces_constfn y_cfn,
2244 		      void *y_cfndata)
2245 {
2246   rtx result = 0;
2247 
2248   if (CONST_INT_P (len) && INTVAL (len) == 0)
2249     return const0_rtx;
2250 
2251   gcc_assert (MEM_P (x) && MEM_P (y));
2252   unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
2253   gcc_assert (align >= BITS_PER_UNIT);
2254 
2255   x = adjust_address (x, BLKmode, 0);
2256   y = adjust_address (y, BLKmode, 0);
2257 
2258   if (equality_only
2259       && CONST_INT_P (len)
2260       && can_do_by_pieces (INTVAL (len), align, COMPARE_BY_PIECES))
2261     result = compare_by_pieces (x, y, INTVAL (len), target, align,
2262 				y_cfn, y_cfndata);
2263   else
2264     result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
2265 
2266   return result;
2267 }
2268 
2269 /* Copy all or part of a value X into registers starting at REGNO.
2270    The number of registers to be filled is NREGS.  */
2271 
2272 void
move_block_to_reg(int regno,rtx x,int nregs,machine_mode mode)2273 move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode)
2274 {
2275   if (nregs == 0)
2276     return;
2277 
2278   if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
2279     x = validize_mem (force_const_mem (mode, x));
2280 
2281   /* See if the machine can do this with a load multiple insn.  */
2282   if (targetm.have_load_multiple ())
2283     {
2284       rtx_insn *last = get_last_insn ();
2285       rtx first = gen_rtx_REG (word_mode, regno);
2286       if (rtx_insn *pat = targetm.gen_load_multiple (first, x,
2287 						     GEN_INT (nregs)))
2288 	{
2289 	  emit_insn (pat);
2290 	  return;
2291 	}
2292       else
2293 	delete_insns_since (last);
2294     }
2295 
2296   for (int i = 0; i < nregs; i++)
2297     emit_move_insn (gen_rtx_REG (word_mode, regno + i),
2298 		    operand_subword_force (x, i, mode));
2299 }
2300 
2301 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2302    The number of registers to be filled is NREGS.  */
2303 
2304 void
move_block_from_reg(int regno,rtx x,int nregs)2305 move_block_from_reg (int regno, rtx x, int nregs)
2306 {
2307   if (nregs == 0)
2308     return;
2309 
2310   /* See if the machine can do this with a store multiple insn.  */
2311   if (targetm.have_store_multiple ())
2312     {
2313       rtx_insn *last = get_last_insn ();
2314       rtx first = gen_rtx_REG (word_mode, regno);
2315       if (rtx_insn *pat = targetm.gen_store_multiple (x, first,
2316 						      GEN_INT (nregs)))
2317 	{
2318 	  emit_insn (pat);
2319 	  return;
2320 	}
2321       else
2322 	delete_insns_since (last);
2323     }
2324 
2325   for (int i = 0; i < nregs; i++)
2326     {
2327       rtx tem = operand_subword (x, i, 1, BLKmode);
2328 
2329       gcc_assert (tem);
2330 
2331       emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
2332     }
2333 }
2334 
2335 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2336    ORIG, where ORIG is a non-consecutive group of registers represented by
2337    a PARALLEL.  The clone is identical to the original except in that the
2338    original set of registers is replaced by a new set of pseudo registers.
2339    The new set has the same modes as the original set.  */
2340 
2341 rtx
gen_group_rtx(rtx orig)2342 gen_group_rtx (rtx orig)
2343 {
2344   int i, length;
2345   rtx *tmps;
2346 
2347   gcc_assert (GET_CODE (orig) == PARALLEL);
2348 
2349   length = XVECLEN (orig, 0);
2350   tmps = XALLOCAVEC (rtx, length);
2351 
2352   /* Skip a NULL entry in first slot.  */
2353   i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
2354 
2355   if (i)
2356     tmps[0] = 0;
2357 
2358   for (; i < length; i++)
2359     {
2360       machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0));
2361       rtx offset = XEXP (XVECEXP (orig, 0, i), 1);
2362 
2363       tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset);
2364     }
2365 
2366   return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
2367 }
2368 
2369 /* A subroutine of emit_group_load.  Arguments as for emit_group_load,
2370    except that values are placed in TMPS[i], and must later be moved
2371    into corresponding XEXP (XVECEXP (DST, 0, i), 0) element.  */
2372 
2373 static void
emit_group_load_1(rtx * tmps,rtx dst,rtx orig_src,tree type,poly_int64 ssize)2374 emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
2375 		   poly_int64 ssize)
2376 {
2377   rtx src;
2378   int start, i;
2379   machine_mode m = GET_MODE (orig_src);
2380 
2381   gcc_assert (GET_CODE (dst) == PARALLEL);
2382 
2383   if (m != VOIDmode
2384       && !SCALAR_INT_MODE_P (m)
2385       && !MEM_P (orig_src)
2386       && GET_CODE (orig_src) != CONCAT)
2387     {
2388       scalar_int_mode imode;
2389       if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
2390 	{
2391 	  src = gen_reg_rtx (imode);
2392 	  emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
2393 	}
2394       else
2395 	{
2396 	  src = assign_stack_temp (GET_MODE (orig_src), ssize);
2397 	  emit_move_insn (src, orig_src);
2398 	}
2399       emit_group_load_1 (tmps, dst, src, type, ssize);
2400       return;
2401     }
2402 
2403   /* Check for a NULL entry, used to indicate that the parameter goes
2404      both on the stack and in registers.  */
2405   if (XEXP (XVECEXP (dst, 0, 0), 0))
2406     start = 0;
2407   else
2408     start = 1;
2409 
2410   /* Process the pieces.  */
2411   for (i = start; i < XVECLEN (dst, 0); i++)
2412     {
2413       machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
2414       poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
2415       poly_int64 bytelen = GET_MODE_SIZE (mode);
2416       poly_int64 shift = 0;
2417 
2418       /* Handle trailing fragments that run over the size of the struct.
2419 	 It's the target's responsibility to make sure that the fragment
2420 	 cannot be strictly smaller in some cases and strictly larger
2421 	 in others.  */
2422       gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2423       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2424 	{
2425 	  /* Arrange to shift the fragment to where it belongs.
2426 	     extract_bit_field loads to the lsb of the reg.  */
2427 	  if (
2428 #ifdef BLOCK_REG_PADDING
2429 	      BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
2430 	      == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2431 #else
2432 	      BYTES_BIG_ENDIAN
2433 #endif
2434 	      )
2435 	    shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2436 	  bytelen = ssize - bytepos;
2437 	  gcc_assert (maybe_gt (bytelen, 0));
2438 	}
2439 
2440       /* If we won't be loading directly from memory, protect the real source
2441 	 from strange tricks we might play; but make sure that the source can
2442 	 be loaded directly into the destination.  */
2443       src = orig_src;
2444       if (!MEM_P (orig_src)
2445 	  && (!CONSTANT_P (orig_src)
2446 	      || (GET_MODE (orig_src) != mode
2447 		  && GET_MODE (orig_src) != VOIDmode)))
2448 	{
2449 	  if (GET_MODE (orig_src) == VOIDmode)
2450 	    src = gen_reg_rtx (mode);
2451 	  else
2452 	    src = gen_reg_rtx (GET_MODE (orig_src));
2453 
2454 	  emit_move_insn (src, orig_src);
2455 	}
2456 
2457       /* Optimize the access just a bit.  */
2458       if (MEM_P (src)
2459 	  && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
2460 	      || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
2461 	  && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
2462 	  && known_eq (bytelen, GET_MODE_SIZE (mode)))
2463 	{
2464 	  tmps[i] = gen_reg_rtx (mode);
2465 	  emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
2466 	}
2467       else if (COMPLEX_MODE_P (mode)
2468 	       && GET_MODE (src) == mode
2469 	       && known_eq (bytelen, GET_MODE_SIZE (mode)))
2470 	/* Let emit_move_complex do the bulk of the work.  */
2471 	tmps[i] = src;
2472       else if (GET_CODE (src) == CONCAT)
2473 	{
2474 	  poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
2475 	  poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
2476 	  unsigned int elt;
2477 	  poly_int64 subpos;
2478 
2479 	  if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
2480 	      && known_le (subpos + bytelen, slen0))
2481 	    {
2482 	      /* The following assumes that the concatenated objects all
2483 		 have the same size.  In this case, a simple calculation
2484 		 can be used to determine the object and the bit field
2485 		 to be extracted.  */
2486 	      tmps[i] = XEXP (src, elt);
2487 	      if (maybe_ne (subpos, 0)
2488 		  || maybe_ne (subpos + bytelen, slen0)
2489 		  || (!CONSTANT_P (tmps[i])
2490 		      && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
2491 		tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
2492 					     subpos * BITS_PER_UNIT,
2493 					     1, NULL_RTX, mode, mode, false,
2494 					     NULL);
2495 	    }
2496 	  else
2497 	    {
2498 	      rtx mem;
2499 
2500 	      gcc_assert (known_eq (bytepos, 0));
2501 	      mem = assign_stack_temp (GET_MODE (src), slen);
2502 	      emit_move_insn (mem, src);
2503 	      tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
2504 					   0, 1, NULL_RTX, mode, mode, false,
2505 					   NULL);
2506 	    }
2507 	}
2508       else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
2509                && XVECLEN (dst, 0) > 1)
2510         tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
2511       else if (CONSTANT_P (src))
2512 	{
2513 	  if (known_eq (bytelen, ssize))
2514 	    tmps[i] = src;
2515 	  else
2516 	    {
2517 	      rtx first, second;
2518 
2519 	      /* TODO: const_wide_int can have sizes other than this...  */
2520 	      gcc_assert (known_eq (2 * bytelen, ssize));
2521 	      split_double (src, &first, &second);
2522 	      if (i)
2523 		tmps[i] = second;
2524 	      else
2525 		tmps[i] = first;
2526 	    }
2527 	}
2528       else if (REG_P (src) && GET_MODE (src) == mode)
2529 	tmps[i] = src;
2530       else
2531 	tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
2532 				     bytepos * BITS_PER_UNIT, 1, NULL_RTX,
2533 				     mode, mode, false, NULL);
2534 
2535       if (maybe_ne (shift, 0))
2536 	tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
2537 				shift, tmps[i], 0);
2538     }
2539 }
2540 
2541 /* Emit code to move a block SRC of type TYPE to a block DST,
2542    where DST is non-consecutive registers represented by a PARALLEL.
2543    SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2544    if not known.  */
2545 
2546 void
emit_group_load(rtx dst,rtx src,tree type,poly_int64 ssize)2547 emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
2548 {
2549   rtx *tmps;
2550   int i;
2551 
2552   tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0));
2553   emit_group_load_1 (tmps, dst, src, type, ssize);
2554 
2555   /* Copy the extracted pieces into the proper (probable) hard regs.  */
2556   for (i = 0; i < XVECLEN (dst, 0); i++)
2557     {
2558       rtx d = XEXP (XVECEXP (dst, 0, i), 0);
2559       if (d == NULL)
2560 	continue;
2561       emit_move_insn (d, tmps[i]);
2562     }
2563 }
2564 
2565 /* Similar, but load SRC into new pseudos in a format that looks like
2566    PARALLEL.  This can later be fed to emit_group_move to get things
2567    in the right place.  */
2568 
2569 rtx
emit_group_load_into_temps(rtx parallel,rtx src,tree type,poly_int64 ssize)2570 emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
2571 {
2572   rtvec vec;
2573   int i;
2574 
2575   vec = rtvec_alloc (XVECLEN (parallel, 0));
2576   emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
2577 
2578   /* Convert the vector to look just like the original PARALLEL, except
2579      with the computed values.  */
2580   for (i = 0; i < XVECLEN (parallel, 0); i++)
2581     {
2582       rtx e = XVECEXP (parallel, 0, i);
2583       rtx d = XEXP (e, 0);
2584 
2585       if (d)
2586 	{
2587 	  d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
2588 	  e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
2589 	}
2590       RTVEC_ELT (vec, i) = e;
2591     }
2592 
2593   return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
2594 }
2595 
2596 /* Emit code to move a block SRC to block DST, where SRC and DST are
2597    non-consecutive groups of registers, each represented by a PARALLEL.  */
2598 
2599 void
emit_group_move(rtx dst,rtx src)2600 emit_group_move (rtx dst, rtx src)
2601 {
2602   int i;
2603 
2604   gcc_assert (GET_CODE (src) == PARALLEL
2605 	      && GET_CODE (dst) == PARALLEL
2606 	      && XVECLEN (src, 0) == XVECLEN (dst, 0));
2607 
2608   /* Skip first entry if NULL.  */
2609   for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++)
2610     emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0),
2611 		    XEXP (XVECEXP (src, 0, i), 0));
2612 }
2613 
2614 /* Move a group of registers represented by a PARALLEL into pseudos.  */
2615 
2616 rtx
emit_group_move_into_temps(rtx src)2617 emit_group_move_into_temps (rtx src)
2618 {
2619   rtvec vec = rtvec_alloc (XVECLEN (src, 0));
2620   int i;
2621 
2622   for (i = 0; i < XVECLEN (src, 0); i++)
2623     {
2624       rtx e = XVECEXP (src, 0, i);
2625       rtx d = XEXP (e, 0);
2626 
2627       if (d)
2628 	e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
2629       RTVEC_ELT (vec, i) = e;
2630     }
2631 
2632   return gen_rtx_PARALLEL (GET_MODE (src), vec);
2633 }
2634 
2635 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2636    where SRC is non-consecutive registers represented by a PARALLEL.
2637    SSIZE represents the total size of block ORIG_DST, or -1 if not
2638    known.  */
2639 
2640 void
emit_group_store(rtx orig_dst,rtx src,tree type ATTRIBUTE_UNUSED,poly_int64 ssize)2641 emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
2642 		  poly_int64 ssize)
2643 {
2644   rtx *tmps, dst;
2645   int start, finish, i;
2646   machine_mode m = GET_MODE (orig_dst);
2647 
2648   gcc_assert (GET_CODE (src) == PARALLEL);
2649 
2650   if (!SCALAR_INT_MODE_P (m)
2651       && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
2652     {
2653       scalar_int_mode imode;
2654       if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
2655 	{
2656 	  dst = gen_reg_rtx (imode);
2657 	  emit_group_store (dst, src, type, ssize);
2658 	  dst = gen_lowpart (GET_MODE (orig_dst), dst);
2659 	}
2660       else
2661 	{
2662 	  dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
2663 	  emit_group_store (dst, src, type, ssize);
2664 	}
2665       emit_move_insn (orig_dst, dst);
2666       return;
2667     }
2668 
2669   /* Check for a NULL entry, used to indicate that the parameter goes
2670      both on the stack and in registers.  */
2671   if (XEXP (XVECEXP (src, 0, 0), 0))
2672     start = 0;
2673   else
2674     start = 1;
2675   finish = XVECLEN (src, 0);
2676 
2677   tmps = XALLOCAVEC (rtx, finish);
2678 
2679   /* Copy the (probable) hard regs into pseudos.  */
2680   for (i = start; i < finish; i++)
2681     {
2682       rtx reg = XEXP (XVECEXP (src, 0, i), 0);
2683       if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2684 	{
2685 	  tmps[i] = gen_reg_rtx (GET_MODE (reg));
2686 	  emit_move_insn (tmps[i], reg);
2687 	}
2688       else
2689 	tmps[i] = reg;
2690     }
2691 
2692   /* If we won't be storing directly into memory, protect the real destination
2693      from strange tricks we might play.  */
2694   dst = orig_dst;
2695   if (GET_CODE (dst) == PARALLEL)
2696     {
2697       rtx temp;
2698 
2699       /* We can get a PARALLEL dst if there is a conditional expression in
2700 	 a return statement.  In that case, the dst and src are the same,
2701 	 so no action is necessary.  */
2702       if (rtx_equal_p (dst, src))
2703 	return;
2704 
2705       /* It is unclear if we can ever reach here, but we may as well handle
2706 	 it.  Allocate a temporary, and split this into a store/load to/from
2707 	 the temporary.  */
2708       temp = assign_stack_temp (GET_MODE (dst), ssize);
2709       emit_group_store (temp, src, type, ssize);
2710       emit_group_load (dst, temp, type, ssize);
2711       return;
2712     }
2713   else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
2714     {
2715       machine_mode outer = GET_MODE (dst);
2716       machine_mode inner;
2717       poly_int64 bytepos;
2718       bool done = false;
2719       rtx temp;
2720 
2721       if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
2722 	dst = gen_reg_rtx (outer);
2723 
2724       /* Make life a bit easier for combine.  */
2725       /* If the first element of the vector is the low part
2726 	 of the destination mode, use a paradoxical subreg to
2727 	 initialize the destination.  */
2728       if (start < finish)
2729 	{
2730 	  inner = GET_MODE (tmps[start]);
2731 	  bytepos = subreg_lowpart_offset (inner, outer);
2732 	  if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
2733 			bytepos))
2734 	    {
2735 	      temp = simplify_gen_subreg (outer, tmps[start],
2736 					  inner, 0);
2737 	      if (temp)
2738 		{
2739 		  emit_move_insn (dst, temp);
2740 		  done = true;
2741 		  start++;
2742 		}
2743 	    }
2744 	}
2745 
2746       /* If the first element wasn't the low part, try the last.  */
2747       if (!done
2748 	  && start < finish - 1)
2749 	{
2750 	  inner = GET_MODE (tmps[finish - 1]);
2751 	  bytepos = subreg_lowpart_offset (inner, outer);
2752 	  if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
2753 							  finish - 1), 1)),
2754 			bytepos))
2755 	    {
2756 	      temp = simplify_gen_subreg (outer, tmps[finish - 1],
2757 					  inner, 0);
2758 	      if (temp)
2759 		{
2760 		  emit_move_insn (dst, temp);
2761 		  done = true;
2762 		  finish--;
2763 		}
2764 	    }
2765 	}
2766 
2767       /* Otherwise, simply initialize the result to zero.  */
2768       if (!done)
2769         emit_move_insn (dst, CONST0_RTX (outer));
2770     }
2771 
2772   /* Process the pieces.  */
2773   for (i = start; i < finish; i++)
2774     {
2775       poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
2776       machine_mode mode = GET_MODE (tmps[i]);
2777       poly_int64 bytelen = GET_MODE_SIZE (mode);
2778       poly_uint64 adj_bytelen;
2779       rtx dest = dst;
2780 
2781       /* Handle trailing fragments that run over the size of the struct.
2782 	 It's the target's responsibility to make sure that the fragment
2783 	 cannot be strictly smaller in some cases and strictly larger
2784 	 in others.  */
2785       gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2786       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2787 	adj_bytelen = ssize - bytepos;
2788       else
2789 	adj_bytelen = bytelen;
2790 
2791       if (GET_CODE (dst) == CONCAT)
2792 	{
2793 	  if (known_le (bytepos + adj_bytelen,
2794 			GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2795 	    dest = XEXP (dst, 0);
2796 	  else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2797 	    {
2798 	      bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
2799 	      dest = XEXP (dst, 1);
2800 	    }
2801 	  else
2802 	    {
2803 	      machine_mode dest_mode = GET_MODE (dest);
2804 	      machine_mode tmp_mode = GET_MODE (tmps[i]);
2805 
2806 	      gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
2807 
2808 	      if (GET_MODE_ALIGNMENT (dest_mode)
2809 		  >= GET_MODE_ALIGNMENT (tmp_mode))
2810 		{
2811 		  dest = assign_stack_temp (dest_mode,
2812 					    GET_MODE_SIZE (dest_mode));
2813 		  emit_move_insn (adjust_address (dest,
2814 						  tmp_mode,
2815 						  bytepos),
2816 				  tmps[i]);
2817 		  dst = dest;
2818 		}
2819 	      else
2820 		{
2821 		  dest = assign_stack_temp (tmp_mode,
2822 					    GET_MODE_SIZE (tmp_mode));
2823 		  emit_move_insn (dest, tmps[i]);
2824 		  dst = adjust_address (dest, dest_mode, bytepos);
2825 		}
2826 	      break;
2827 	    }
2828 	}
2829 
2830       /* Handle trailing fragments that run over the size of the struct.  */
2831       if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2832 	{
2833 	  /* store_bit_field always takes its value from the lsb.
2834 	     Move the fragment to the lsb if it's not already there.  */
2835 	  if (
2836 #ifdef BLOCK_REG_PADDING
2837 	      BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
2838 	      == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2839 #else
2840 	      BYTES_BIG_ENDIAN
2841 #endif
2842 	      )
2843 	    {
2844 	      poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2845 	      tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
2846 				      shift, tmps[i], 0);
2847 	    }
2848 
2849 	  /* Make sure not to write past the end of the struct.  */
2850 	  store_bit_field (dest,
2851 			   adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2852 			   bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT - 1,
2853 			   VOIDmode, tmps[i], false);
2854 	}
2855 
2856       /* Optimize the access just a bit.  */
2857       else if (MEM_P (dest)
2858 	       && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
2859 		   || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
2860 	       && multiple_p (bytepos * BITS_PER_UNIT,
2861 			      GET_MODE_ALIGNMENT (mode))
2862 	       && known_eq (bytelen, GET_MODE_SIZE (mode)))
2863 	emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
2864 
2865       else
2866 	store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2867 			 0, 0, mode, tmps[i], false);
2868     }
2869 
2870   /* Copy from the pseudo into the (probable) hard reg.  */
2871   if (orig_dst != dst)
2872     emit_move_insn (orig_dst, dst);
2873 }
2874 
2875 /* Return a form of X that does not use a PARALLEL.  TYPE is the type
2876    of the value stored in X.  */
2877 
2878 rtx
maybe_emit_group_store(rtx x,tree type)2879 maybe_emit_group_store (rtx x, tree type)
2880 {
2881   machine_mode mode = TYPE_MODE (type);
2882   gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
2883   if (GET_CODE (x) == PARALLEL)
2884     {
2885       rtx result = gen_reg_rtx (mode);
2886       emit_group_store (result, x, type, int_size_in_bytes (type));
2887       return result;
2888     }
2889   return x;
2890 }
2891 
2892 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2893 
2894    This is used on targets that return BLKmode values in registers.  */
2895 
2896 static void
copy_blkmode_from_reg(rtx target,rtx srcreg,tree type)2897 copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
2898 {
2899   unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
2900   rtx src = NULL, dst = NULL;
2901   unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
2902   unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
2903   /* No current ABI uses variable-sized modes to pass a BLKmnode type.  */
2904   fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg));
2905   fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target));
2906   fixed_size_mode copy_mode;
2907 
2908   /* BLKmode registers created in the back-end shouldn't have survived.  */
2909   gcc_assert (mode != BLKmode);
2910 
2911   /* If the structure doesn't take up a whole number of words, see whether
2912      SRCREG is padded on the left or on the right.  If it's on the left,
2913      set PADDING_CORRECTION to the number of bits to skip.
2914 
2915      In most ABIs, the structure will be returned at the least end of
2916      the register, which translates to right padding on little-endian
2917      targets and left padding on big-endian targets.  The opposite
2918      holds if the structure is returned at the most significant
2919      end of the register.  */
2920   if (bytes % UNITS_PER_WORD != 0
2921       && (targetm.calls.return_in_msb (type)
2922 	  ? !BYTES_BIG_ENDIAN
2923 	  : BYTES_BIG_ENDIAN))
2924     padding_correction
2925       = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
2926 
2927   /* We can use a single move if we have an exact mode for the size.  */
2928   else if (MEM_P (target)
2929 	   && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
2930 	       || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
2931 	   && bytes == GET_MODE_SIZE (mode))
2932   {
2933     emit_move_insn (adjust_address (target, mode, 0), srcreg);
2934     return;
2935   }
2936 
2937   /* And if we additionally have the same mode for a register.  */
2938   else if (REG_P (target)
2939 	   && GET_MODE (target) == mode
2940 	   && bytes == GET_MODE_SIZE (mode))
2941   {
2942     emit_move_insn (target, srcreg);
2943     return;
2944   }
2945 
2946   /* This code assumes srcreg is at least a full word.  If it isn't, copy it
2947      into a new pseudo which is a full word.  */
2948   if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2949     {
2950       srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
2951       mode = word_mode;
2952     }
2953 
2954   /* Copy the structure BITSIZE bits at a time.  If the target lives in
2955      memory, take care of not reading/writing past its end by selecting
2956      a copy mode suited to BITSIZE.  This should always be possible given
2957      how it is computed.
2958 
2959      If the target lives in register, make sure not to select a copy mode
2960      larger than the mode of the register.
2961 
2962      We could probably emit more efficient code for machines which do not use
2963      strict alignment, but it doesn't seem worth the effort at the current
2964      time.  */
2965 
2966   copy_mode = word_mode;
2967   if (MEM_P (target))
2968     {
2969       opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
2970       if (mem_mode.exists ())
2971 	copy_mode = mem_mode.require ();
2972     }
2973   else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2974     copy_mode = tmode;
2975 
2976   for (bitpos = 0, xbitpos = padding_correction;
2977        bitpos < bytes * BITS_PER_UNIT;
2978        bitpos += bitsize, xbitpos += bitsize)
2979     {
2980       /* We need a new source operand each time xbitpos is on a
2981 	 word boundary and when xbitpos == padding_correction
2982 	 (the first time through).  */
2983       if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
2984 	src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
2985 
2986       /* We need a new destination operand each time bitpos is on
2987 	 a word boundary.  */
2988       if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2989 	dst = target;
2990       else if (bitpos % BITS_PER_WORD == 0)
2991 	dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
2992 
2993       /* Use xbitpos for the source extraction (right justified) and
2994 	 bitpos for the destination store (left justified).  */
2995       store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
2996 		       extract_bit_field (src, bitsize,
2997 					  xbitpos % BITS_PER_WORD, 1,
2998 					  NULL_RTX, copy_mode, copy_mode,
2999 					  false, NULL),
3000 		       false);
3001     }
3002 }
3003 
3004 /* Copy BLKmode value SRC into a register of mode MODE_IN.  Return the
3005    register if it contains any data, otherwise return null.
3006 
3007    This is used on targets that return BLKmode values in registers.  */
3008 
3009 rtx
copy_blkmode_to_reg(machine_mode mode_in,tree src)3010 copy_blkmode_to_reg (machine_mode mode_in, tree src)
3011 {
3012   int i, n_regs;
3013   unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0, bytes;
3014   unsigned int bitsize;
3015   rtx *dst_words, dst, x, src_word = NULL_RTX, dst_word = NULL_RTX;
3016   /* No current ABI uses variable-sized modes to pass a BLKmnode type.  */
3017   fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
3018   fixed_size_mode dst_mode;
3019   scalar_int_mode min_mode;
3020 
3021   gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
3022 
3023   x = expand_normal (src);
3024 
3025   bytes = arg_int_size_in_bytes (TREE_TYPE (src));
3026   if (bytes == 0)
3027     return NULL_RTX;
3028 
3029   /* If the structure doesn't take up a whole number of words, see
3030      whether the register value should be padded on the left or on
3031      the right.  Set PADDING_CORRECTION to the number of padding
3032      bits needed on the left side.
3033 
3034      In most ABIs, the structure will be returned at the least end of
3035      the register, which translates to right padding on little-endian
3036      targets and left padding on big-endian targets.  The opposite
3037      holds if the structure is returned at the most significant
3038      end of the register.  */
3039   if (bytes % UNITS_PER_WORD != 0
3040       && (targetm.calls.return_in_msb (TREE_TYPE (src))
3041 	  ? !BYTES_BIG_ENDIAN
3042 	  : BYTES_BIG_ENDIAN))
3043     padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
3044 					   * BITS_PER_UNIT));
3045 
3046   n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3047   dst_words = XALLOCAVEC (rtx, n_regs);
3048   bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
3049   min_mode = smallest_int_mode_for_size (bitsize);
3050 
3051   /* Copy the structure BITSIZE bits at a time.  */
3052   for (bitpos = 0, xbitpos = padding_correction;
3053        bitpos < bytes * BITS_PER_UNIT;
3054        bitpos += bitsize, xbitpos += bitsize)
3055     {
3056       /* We need a new destination pseudo each time xbitpos is
3057 	 on a word boundary and when xbitpos == padding_correction
3058 	 (the first time through).  */
3059       if (xbitpos % BITS_PER_WORD == 0
3060 	  || xbitpos == padding_correction)
3061 	{
3062 	  /* Generate an appropriate register.  */
3063 	  dst_word = gen_reg_rtx (word_mode);
3064 	  dst_words[xbitpos / BITS_PER_WORD] = dst_word;
3065 
3066 	  /* Clear the destination before we move anything into it.  */
3067 	  emit_move_insn (dst_word, CONST0_RTX (word_mode));
3068 	}
3069 
3070       /* Find the largest integer mode that can be used to copy all or as
3071 	 many bits as possible of the structure if the target supports larger
3072 	 copies.  There are too many corner cases here w.r.t to alignments on
3073 	 the read/writes.  So if there is any padding just use single byte
3074 	 operations.  */
3075       opt_scalar_int_mode mode_iter;
3076       if (padding_correction == 0 && !STRICT_ALIGNMENT)
3077 	{
3078 	  FOR_EACH_MODE_FROM (mode_iter, min_mode)
3079 	    {
3080 	      unsigned int msize = GET_MODE_BITSIZE (mode_iter.require ());
3081 	      if (msize <= ((bytes * BITS_PER_UNIT) - bitpos)
3082 		  && msize <= BITS_PER_WORD)
3083 		bitsize = msize;
3084 	      else
3085 		break;
3086 	    }
3087 	}
3088 
3089       /* We need a new source operand each time bitpos is on a word
3090 	 boundary.  */
3091       if (bitpos % BITS_PER_WORD == 0)
3092 	src_word = operand_subword_force (x, bitpos / BITS_PER_WORD, BLKmode);
3093 
3094       /* Use bitpos for the source extraction (left justified) and
3095 	 xbitpos for the destination store (right justified).  */
3096       store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
3097 		       0, 0, word_mode,
3098 		       extract_bit_field (src_word, bitsize,
3099 					  bitpos % BITS_PER_WORD, 1,
3100 					  NULL_RTX, word_mode, word_mode,
3101 					  false, NULL),
3102 		       false);
3103     }
3104 
3105   if (mode == BLKmode)
3106     {
3107       /* Find the smallest integer mode large enough to hold the
3108 	 entire structure.  */
3109       opt_scalar_int_mode mode_iter;
3110       FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3111 	if (GET_MODE_SIZE (mode_iter.require ()) >= bytes)
3112 	  break;
3113 
3114       /* A suitable mode should have been found.  */
3115       mode = mode_iter.require ();
3116     }
3117 
3118   if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
3119     dst_mode = word_mode;
3120   else
3121     dst_mode = mode;
3122   dst = gen_reg_rtx (dst_mode);
3123 
3124   for (i = 0; i < n_regs; i++)
3125     emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]);
3126 
3127   if (mode != dst_mode)
3128     dst = gen_lowpart (mode, dst);
3129 
3130   return dst;
3131 }
3132 
3133 /* Add a USE expression for REG to the (possibly empty) list pointed
3134    to by CALL_FUSAGE.  REG must denote a hard register.  */
3135 
3136 void
use_reg_mode(rtx * call_fusage,rtx reg,machine_mode mode)3137 use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
3138 {
3139   gcc_assert (REG_P (reg));
3140 
3141   if (!HARD_REGISTER_P (reg))
3142     return;
3143 
3144   *call_fusage
3145     = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg), *call_fusage);
3146 }
3147 
3148 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
3149    to by CALL_FUSAGE.  REG must denote a hard register.  */
3150 
3151 void
clobber_reg_mode(rtx * call_fusage,rtx reg,machine_mode mode)3152 clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
3153 {
3154   gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
3155 
3156   *call_fusage
3157     = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg), *call_fusage);
3158 }
3159 
3160 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
3161    starting at REGNO.  All of these registers must be hard registers.  */
3162 
3163 void
use_regs(rtx * call_fusage,int regno,int nregs)3164 use_regs (rtx *call_fusage, int regno, int nregs)
3165 {
3166   int i;
3167 
3168   gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
3169 
3170   for (i = 0; i < nregs; i++)
3171     use_reg (call_fusage, regno_reg_rtx[regno + i]);
3172 }
3173 
3174 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
3175    PARALLEL REGS.  This is for calls that pass values in multiple
3176    non-contiguous locations.  The Irix 6 ABI has examples of this.  */
3177 
3178 void
use_group_regs(rtx * call_fusage,rtx regs)3179 use_group_regs (rtx *call_fusage, rtx regs)
3180 {
3181   int i;
3182 
3183   for (i = 0; i < XVECLEN (regs, 0); i++)
3184     {
3185       rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
3186 
3187       /* A NULL entry means the parameter goes both on the stack and in
3188 	 registers.  This can also be a MEM for targets that pass values
3189 	 partially on the stack and partially in registers.  */
3190       if (reg != 0 && REG_P (reg))
3191 	use_reg (call_fusage, reg);
3192     }
3193 }
3194 
3195 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3196    assigment and the code of the expresion on the RHS is CODE.  Return
3197    NULL otherwise.  */
3198 
3199 static gimple *
get_def_for_expr(tree name,enum tree_code code)3200 get_def_for_expr (tree name, enum tree_code code)
3201 {
3202   gimple *def_stmt;
3203 
3204   if (TREE_CODE (name) != SSA_NAME)
3205     return NULL;
3206 
3207   def_stmt = get_gimple_for_ssa_name (name);
3208   if (!def_stmt
3209       || gimple_assign_rhs_code (def_stmt) != code)
3210     return NULL;
3211 
3212   return def_stmt;
3213 }
3214 
3215 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3216    assigment and the class of the expresion on the RHS is CLASS.  Return
3217    NULL otherwise.  */
3218 
3219 static gimple *
get_def_for_expr_class(tree name,enum tree_code_class tclass)3220 get_def_for_expr_class (tree name, enum tree_code_class tclass)
3221 {
3222   gimple *def_stmt;
3223 
3224   if (TREE_CODE (name) != SSA_NAME)
3225     return NULL;
3226 
3227   def_stmt = get_gimple_for_ssa_name (name);
3228   if (!def_stmt
3229       || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
3230     return NULL;
3231 
3232   return def_stmt;
3233 }
3234 
3235 /* Write zeros through the storage of OBJECT.  If OBJECT has BLKmode, SIZE is
3236    its length in bytes.  */
3237 
3238 rtx
clear_storage_hints(rtx object,rtx size,enum block_op_methods method,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size,unsigned ctz_size)3239 clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
3240 		     unsigned int expected_align, HOST_WIDE_INT expected_size,
3241 		     unsigned HOST_WIDE_INT min_size,
3242 		     unsigned HOST_WIDE_INT max_size,
3243 		     unsigned HOST_WIDE_INT probable_max_size,
3244 		     unsigned ctz_size)
3245 {
3246   machine_mode mode = GET_MODE (object);
3247   unsigned int align;
3248 
3249   gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
3250 
3251   /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
3252      just move a zero.  Otherwise, do this a piece at a time.  */
3253   poly_int64 size_val;
3254   if (mode != BLKmode
3255       && poly_int_rtx_p (size, &size_val)
3256       && known_eq (size_val, GET_MODE_SIZE (mode)))
3257     {
3258       rtx zero = CONST0_RTX (mode);
3259       if (zero != NULL)
3260 	{
3261 	  emit_move_insn (object, zero);
3262 	  return NULL;
3263 	}
3264 
3265       if (COMPLEX_MODE_P (mode))
3266 	{
3267 	  zero = CONST0_RTX (GET_MODE_INNER (mode));
3268 	  if (zero != NULL)
3269 	    {
3270 	      write_complex_part (object, zero, 0);
3271 	      write_complex_part (object, zero, 1);
3272 	      return NULL;
3273 	    }
3274 	}
3275     }
3276 
3277   if (size == const0_rtx)
3278     return NULL;
3279 
3280   align = MEM_ALIGN (object);
3281 
3282   if (CONST_INT_P (size)
3283       && targetm.use_by_pieces_infrastructure_p (INTVAL (size), align,
3284 						 CLEAR_BY_PIECES,
3285 						 optimize_insn_for_speed_p ()))
3286     clear_by_pieces (object, INTVAL (size), align);
3287   else if (set_storage_via_setmem (object, size, const0_rtx, align,
3288 				   expected_align, expected_size,
3289 				   min_size, max_size, probable_max_size))
3290     ;
3291   else if (try_store_by_multiple_pieces (object, size, ctz_size,
3292 					 min_size, max_size,
3293 					 NULL_RTX, 0, align))
3294     ;
3295   else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
3296     return set_storage_via_libcall (object, size, const0_rtx,
3297 				    method == BLOCK_OP_TAILCALL);
3298   else
3299     gcc_unreachable ();
3300 
3301   return NULL;
3302 }
3303 
3304 rtx
clear_storage(rtx object,rtx size,enum block_op_methods method)3305 clear_storage (rtx object, rtx size, enum block_op_methods method)
3306 {
3307   unsigned HOST_WIDE_INT max, min = 0;
3308   if (GET_CODE (size) == CONST_INT)
3309     min = max = UINTVAL (size);
3310   else
3311     max = GET_MODE_MASK (GET_MODE (size));
3312   return clear_storage_hints (object, size, method, 0, -1, min, max, max, 0);
3313 }
3314 
3315 
3316 /* A subroutine of clear_storage.  Expand a call to memset.
3317    Return the return value of memset, 0 otherwise.  */
3318 
3319 rtx
set_storage_via_libcall(rtx object,rtx size,rtx val,bool tailcall)3320 set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
3321 {
3322   tree call_expr, fn, object_tree, size_tree, val_tree;
3323   machine_mode size_mode;
3324 
3325   object = copy_addr_to_reg (XEXP (object, 0));
3326   object_tree = make_tree (ptr_type_node, object);
3327 
3328   if (!CONST_INT_P (val))
3329     val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
3330   val_tree = make_tree (integer_type_node, val);
3331 
3332   size_mode = TYPE_MODE (sizetype);
3333   size = convert_to_mode (size_mode, size, 1);
3334   size = copy_to_mode_reg (size_mode, size);
3335   size_tree = make_tree (sizetype, size);
3336 
3337   /* It is incorrect to use the libcall calling conventions for calls to
3338      memset because it can be provided by the user.  */
3339   fn = builtin_decl_implicit (BUILT_IN_MEMSET);
3340   call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree);
3341   CALL_EXPR_TAILCALL (call_expr) = tailcall;
3342 
3343   return expand_call (call_expr, NULL_RTX, false);
3344 }
3345 
3346 /* Expand a setmem pattern; return true if successful.  */
3347 
3348 bool
set_storage_via_setmem(rtx object,rtx size,rtx val,unsigned int align,unsigned int expected_align,HOST_WIDE_INT expected_size,unsigned HOST_WIDE_INT min_size,unsigned HOST_WIDE_INT max_size,unsigned HOST_WIDE_INT probable_max_size)3349 set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
3350 			unsigned int expected_align, HOST_WIDE_INT expected_size,
3351 			unsigned HOST_WIDE_INT min_size,
3352 			unsigned HOST_WIDE_INT max_size,
3353 			unsigned HOST_WIDE_INT probable_max_size)
3354 {
3355   /* Try the most limited insn first, because there's no point
3356      including more than one in the machine description unless
3357      the more limited one has some advantage.  */
3358 
3359   if (expected_align < align)
3360     expected_align = align;
3361   if (expected_size != -1)
3362     {
3363       if ((unsigned HOST_WIDE_INT)expected_size > max_size)
3364 	expected_size = max_size;
3365       if ((unsigned HOST_WIDE_INT)expected_size < min_size)
3366 	expected_size = min_size;
3367     }
3368 
3369   opt_scalar_int_mode mode_iter;
3370   FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3371     {
3372       scalar_int_mode mode = mode_iter.require ();
3373       enum insn_code code = direct_optab_handler (setmem_optab, mode);
3374 
3375       if (code != CODE_FOR_nothing
3376 	  /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3377 	     here because if SIZE is less than the mode mask, as it is
3378 	     returned by the macro, it will definitely be less than the
3379 	     actual mode mask.  Since SIZE is within the Pmode address
3380 	     space, we limit MODE to Pmode.  */
3381 	  && ((CONST_INT_P (size)
3382 	       && ((unsigned HOST_WIDE_INT) INTVAL (size)
3383 		   <= (GET_MODE_MASK (mode) >> 1)))
3384 	      || max_size <= (GET_MODE_MASK (mode) >> 1)
3385 	      || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
3386 	{
3387 	  class expand_operand ops[9];
3388 	  unsigned int nops;
3389 
3390 	  nops = insn_data[(int) code].n_generator_args;
3391 	  gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
3392 
3393 	  create_fixed_operand (&ops[0], object);
3394 	  /* The check above guarantees that this size conversion is valid.  */
3395 	  create_convert_operand_to (&ops[1], size, mode, true);
3396 	  create_convert_operand_from (&ops[2], val, byte_mode, true);
3397 	  create_integer_operand (&ops[3], align / BITS_PER_UNIT);
3398 	  if (nops >= 6)
3399 	    {
3400 	      create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
3401 	      create_integer_operand (&ops[5], expected_size);
3402 	    }
3403 	  if (nops >= 8)
3404 	    {
3405 	      create_integer_operand (&ops[6], min_size);
3406 	      /* If we cannot represent the maximal size,
3407 		 make parameter NULL.  */
3408 	      if ((HOST_WIDE_INT) max_size != -1)
3409 	        create_integer_operand (&ops[7], max_size);
3410 	      else
3411 		create_fixed_operand (&ops[7], NULL);
3412 	    }
3413 	  if (nops == 9)
3414 	    {
3415 	      /* If we cannot represent the maximal size,
3416 		 make parameter NULL.  */
3417 	      if ((HOST_WIDE_INT) probable_max_size != -1)
3418 	        create_integer_operand (&ops[8], probable_max_size);
3419 	      else
3420 		create_fixed_operand (&ops[8], NULL);
3421 	    }
3422 	  if (maybe_expand_insn (code, nops, ops))
3423 	    return true;
3424 	}
3425     }
3426 
3427   return false;
3428 }
3429 
3430 
3431 /* Write to one of the components of the complex value CPLX.  Write VAL to
3432    the real part if IMAG_P is false, and the imaginary part if its true.  */
3433 
3434 void
write_complex_part(rtx cplx,rtx val,bool imag_p)3435 write_complex_part (rtx cplx, rtx val, bool imag_p)
3436 {
3437   machine_mode cmode;
3438   scalar_mode imode;
3439   unsigned ibitsize;
3440 
3441   if (GET_CODE (cplx) == CONCAT)
3442     {
3443       emit_move_insn (XEXP (cplx, imag_p), val);
3444       return;
3445     }
3446 
3447   cmode = GET_MODE (cplx);
3448   imode = GET_MODE_INNER (cmode);
3449   ibitsize = GET_MODE_BITSIZE (imode);
3450 
3451   /* For MEMs simplify_gen_subreg may generate an invalid new address
3452      because, e.g., the original address is considered mode-dependent
3453      by the target, which restricts simplify_subreg from invoking
3454      adjust_address_nv.  Instead of preparing fallback support for an
3455      invalid address, we call adjust_address_nv directly.  */
3456   if (MEM_P (cplx))
3457     {
3458       emit_move_insn (adjust_address_nv (cplx, imode,
3459 					 imag_p ? GET_MODE_SIZE (imode) : 0),
3460 		      val);
3461       return;
3462     }
3463 
3464   /* If the sub-object is at least word sized, then we know that subregging
3465      will work.  This special case is important, since store_bit_field
3466      wants to operate on integer modes, and there's rarely an OImode to
3467      correspond to TCmode.  */
3468   if (ibitsize >= BITS_PER_WORD
3469       /* For hard regs we have exact predicates.  Assume we can split
3470 	 the original object if it spans an even number of hard regs.
3471 	 This special case is important for SCmode on 64-bit platforms
3472 	 where the natural size of floating-point regs is 32-bit.  */
3473       || (REG_P (cplx)
3474 	  && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3475 	  && REG_NREGS (cplx) % 2 == 0))
3476     {
3477       rtx part = simplify_gen_subreg (imode, cplx, cmode,
3478 				      imag_p ? GET_MODE_SIZE (imode) : 0);
3479       if (part)
3480         {
3481 	  emit_move_insn (part, val);
3482 	  return;
3483 	}
3484       else
3485 	/* simplify_gen_subreg may fail for sub-word MEMs.  */
3486 	gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3487     }
3488 
3489   store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val,
3490 		   false);
3491 }
3492 
3493 /* Extract one of the components of the complex value CPLX.  Extract the
3494    real part if IMAG_P is false, and the imaginary part if it's true.  */
3495 
3496 rtx
read_complex_part(rtx cplx,bool imag_p)3497 read_complex_part (rtx cplx, bool imag_p)
3498 {
3499   machine_mode cmode;
3500   scalar_mode imode;
3501   unsigned ibitsize;
3502 
3503   if (GET_CODE (cplx) == CONCAT)
3504     return XEXP (cplx, imag_p);
3505 
3506   cmode = GET_MODE (cplx);
3507   imode = GET_MODE_INNER (cmode);
3508   ibitsize = GET_MODE_BITSIZE (imode);
3509 
3510   /* Special case reads from complex constants that got spilled to memory.  */
3511   if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
3512     {
3513       tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
3514       if (decl && TREE_CODE (decl) == COMPLEX_CST)
3515 	{
3516 	  tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
3517 	  if (CONSTANT_CLASS_P (part))
3518 	    return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
3519 	}
3520     }
3521 
3522   /* For MEMs simplify_gen_subreg may generate an invalid new address
3523      because, e.g., the original address is considered mode-dependent
3524      by the target, which restricts simplify_subreg from invoking
3525      adjust_address_nv.  Instead of preparing fallback support for an
3526      invalid address, we call adjust_address_nv directly.  */
3527   if (MEM_P (cplx))
3528     return adjust_address_nv (cplx, imode,
3529 			      imag_p ? GET_MODE_SIZE (imode) : 0);
3530 
3531   /* If the sub-object is at least word sized, then we know that subregging
3532      will work.  This special case is important, since extract_bit_field
3533      wants to operate on integer modes, and there's rarely an OImode to
3534      correspond to TCmode.  */
3535   if (ibitsize >= BITS_PER_WORD
3536       /* For hard regs we have exact predicates.  Assume we can split
3537 	 the original object if it spans an even number of hard regs.
3538 	 This special case is important for SCmode on 64-bit platforms
3539 	 where the natural size of floating-point regs is 32-bit.  */
3540       || (REG_P (cplx)
3541 	  && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3542 	  && REG_NREGS (cplx) % 2 == 0))
3543     {
3544       rtx ret = simplify_gen_subreg (imode, cplx, cmode,
3545 				     imag_p ? GET_MODE_SIZE (imode) : 0);
3546       if (ret)
3547         return ret;
3548       else
3549 	/* simplify_gen_subreg may fail for sub-word MEMs.  */
3550 	gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3551     }
3552 
3553   return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
3554 			    true, NULL_RTX, imode, imode, false, NULL);
3555 }
3556 
3557 /* A subroutine of emit_move_insn_1.  Yet another lowpart generator.
3558    NEW_MODE and OLD_MODE are the same size.  Return NULL if X cannot be
3559    represented in NEW_MODE.  If FORCE is true, this will never happen, as
3560    we'll force-create a SUBREG if needed.  */
3561 
3562 static rtx
emit_move_change_mode(machine_mode new_mode,machine_mode old_mode,rtx x,bool force)3563 emit_move_change_mode (machine_mode new_mode,
3564 		       machine_mode old_mode, rtx x, bool force)
3565 {
3566   rtx ret;
3567 
3568   if (push_operand (x, GET_MODE (x)))
3569     {
3570       ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
3571       MEM_COPY_ATTRIBUTES (ret, x);
3572     }
3573   else if (MEM_P (x))
3574     {
3575       /* We don't have to worry about changing the address since the
3576 	 size in bytes is supposed to be the same.  */
3577       if (reload_in_progress)
3578 	{
3579 	  /* Copy the MEM to change the mode and move any
3580 	     substitutions from the old MEM to the new one.  */
3581 	  ret = adjust_address_nv (x, new_mode, 0);
3582 	  copy_replacements (x, ret);
3583 	}
3584       else
3585 	ret = adjust_address (x, new_mode, 0);
3586     }
3587   else
3588     {
3589       /* Note that we do want simplify_subreg's behavior of validating
3590 	 that the new mode is ok for a hard register.  If we were to use
3591 	 simplify_gen_subreg, we would create the subreg, but would
3592 	 probably run into the target not being able to implement it.  */
3593       /* Except, of course, when FORCE is true, when this is exactly what
3594 	 we want.  Which is needed for CCmodes on some targets.  */
3595       if (force)
3596 	ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
3597       else
3598 	ret = simplify_subreg (new_mode, x, old_mode, 0);
3599     }
3600 
3601   return ret;
3602 }
3603 
3604 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X using
3605    an integer mode of the same size as MODE.  Returns the instruction
3606    emitted, or NULL if such a move could not be generated.  */
3607 
3608 static rtx_insn *
emit_move_via_integer(machine_mode mode,rtx x,rtx y,bool force)3609 emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
3610 {
3611   scalar_int_mode imode;
3612   enum insn_code code;
3613 
3614   /* There must exist a mode of the exact size we require.  */
3615   if (!int_mode_for_mode (mode).exists (&imode))
3616     return NULL;
3617 
3618   /* The target must support moves in this mode.  */
3619   code = optab_handler (mov_optab, imode);
3620   if (code == CODE_FOR_nothing)
3621     return NULL;
3622 
3623   x = emit_move_change_mode (imode, mode, x, force);
3624   if (x == NULL_RTX)
3625     return NULL;
3626   y = emit_move_change_mode (imode, mode, y, force);
3627   if (y == NULL_RTX)
3628     return NULL;
3629   return emit_insn (GEN_FCN (code) (x, y));
3630 }
3631 
3632 /* A subroutine of emit_move_insn_1.  X is a push_operand in MODE.
3633    Return an equivalent MEM that does not use an auto-increment.  */
3634 
3635 rtx
emit_move_resolve_push(machine_mode mode,rtx x)3636 emit_move_resolve_push (machine_mode mode, rtx x)
3637 {
3638   enum rtx_code code = GET_CODE (XEXP (x, 0));
3639   rtx temp;
3640 
3641   poly_int64 adjust = GET_MODE_SIZE (mode);
3642 #ifdef PUSH_ROUNDING
3643   adjust = PUSH_ROUNDING (adjust);
3644 #endif
3645   if (code == PRE_DEC || code == POST_DEC)
3646     adjust = -adjust;
3647   else if (code == PRE_MODIFY || code == POST_MODIFY)
3648     {
3649       rtx expr = XEXP (XEXP (x, 0), 1);
3650 
3651       gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
3652       poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
3653       if (GET_CODE (expr) == MINUS)
3654 	val = -val;
3655       gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
3656       adjust = val;
3657     }
3658 
3659   /* Do not use anti_adjust_stack, since we don't want to update
3660      stack_pointer_delta.  */
3661   temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
3662 			      gen_int_mode (adjust, Pmode), stack_pointer_rtx,
3663 			      0, OPTAB_LIB_WIDEN);
3664   if (temp != stack_pointer_rtx)
3665     emit_move_insn (stack_pointer_rtx, temp);
3666 
3667   switch (code)
3668     {
3669     case PRE_INC:
3670     case PRE_DEC:
3671     case PRE_MODIFY:
3672       temp = stack_pointer_rtx;
3673       break;
3674     case POST_INC:
3675     case POST_DEC:
3676     case POST_MODIFY:
3677       temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
3678       break;
3679     default:
3680       gcc_unreachable ();
3681     }
3682 
3683   return replace_equiv_address (x, temp);
3684 }
3685 
3686 /* A subroutine of emit_move_complex.  Generate a move from Y into X.
3687    X is known to satisfy push_operand, and MODE is known to be complex.
3688    Returns the last instruction emitted.  */
3689 
3690 rtx_insn *
emit_move_complex_push(machine_mode mode,rtx x,rtx y)3691 emit_move_complex_push (machine_mode mode, rtx x, rtx y)
3692 {
3693   scalar_mode submode = GET_MODE_INNER (mode);
3694   bool imag_first;
3695 
3696 #ifdef PUSH_ROUNDING
3697   poly_int64 submodesize = GET_MODE_SIZE (submode);
3698 
3699   /* In case we output to the stack, but the size is smaller than the
3700      machine can push exactly, we need to use move instructions.  */
3701   if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
3702     {
3703       x = emit_move_resolve_push (mode, x);
3704       return emit_move_insn (x, y);
3705     }
3706 #endif
3707 
3708   /* Note that the real part always precedes the imag part in memory
3709      regardless of machine's endianness.  */
3710   switch (GET_CODE (XEXP (x, 0)))
3711     {
3712     case PRE_DEC:
3713     case POST_DEC:
3714       imag_first = true;
3715       break;
3716     case PRE_INC:
3717     case POST_INC:
3718       imag_first = false;
3719       break;
3720     default:
3721       gcc_unreachable ();
3722     }
3723 
3724   emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3725 		  read_complex_part (y, imag_first));
3726   return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3727 			 read_complex_part (y, !imag_first));
3728 }
3729 
3730 /* A subroutine of emit_move_complex.  Perform the move from Y to X
3731    via two moves of the parts.  Returns the last instruction emitted.  */
3732 
3733 rtx_insn *
emit_move_complex_parts(rtx x,rtx y)3734 emit_move_complex_parts (rtx x, rtx y)
3735 {
3736   /* Show the output dies here.  This is necessary for SUBREGs
3737      of pseudos since we cannot track their lifetimes correctly;
3738      hard regs shouldn't appear here except as return values.  */
3739   if (!reload_completed && !reload_in_progress
3740       && REG_P (x) && !reg_overlap_mentioned_p (x, y))
3741     emit_clobber (x);
3742 
3743   write_complex_part (x, read_complex_part (y, false), false);
3744   write_complex_part (x, read_complex_part (y, true), true);
3745 
3746   return get_last_insn ();
3747 }
3748 
3749 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3750    MODE is known to be complex.  Returns the last instruction emitted.  */
3751 
3752 static rtx_insn *
emit_move_complex(machine_mode mode,rtx x,rtx y)3753 emit_move_complex (machine_mode mode, rtx x, rtx y)
3754 {
3755   bool try_int;
3756 
3757   /* Need to take special care for pushes, to maintain proper ordering
3758      of the data, and possibly extra padding.  */
3759   if (push_operand (x, mode))
3760     return emit_move_complex_push (mode, x, y);
3761 
3762   /* See if we can coerce the target into moving both values at once, except
3763      for floating point where we favor moving as parts if this is easy.  */
3764   if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3765       && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
3766       && !(REG_P (x)
3767 	   && HARD_REGISTER_P (x)
3768 	   && REG_NREGS (x) == 1)
3769       && !(REG_P (y)
3770 	   && HARD_REGISTER_P (y)
3771 	   && REG_NREGS (y) == 1))
3772     try_int = false;
3773   /* Not possible if the values are inherently not adjacent.  */
3774   else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
3775     try_int = false;
3776   /* Is possible if both are registers (or subregs of registers).  */
3777   else if (register_operand (x, mode) && register_operand (y, mode))
3778     try_int = true;
3779   /* If one of the operands is a memory, and alignment constraints
3780      are friendly enough, we may be able to do combined memory operations.
3781      We do not attempt this if Y is a constant because that combination is
3782      usually better with the by-parts thing below.  */
3783   else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
3784 	   && (!STRICT_ALIGNMENT
3785 	       || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
3786     try_int = true;
3787   else
3788     try_int = false;
3789 
3790   if (try_int)
3791     {
3792       rtx_insn *ret;
3793 
3794       /* For memory to memory moves, optimal behavior can be had with the
3795 	 existing block move logic.  But use normal expansion if optimizing
3796 	 for size.  */
3797       if (MEM_P (x) && MEM_P (y))
3798 	{
3799 	  emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
3800 			   (optimize_insn_for_speed_p()
3801 			    ? BLOCK_OP_NO_LIBCALL : BLOCK_OP_NORMAL));
3802 	  return get_last_insn ();
3803 	}
3804 
3805       ret = emit_move_via_integer (mode, x, y, true);
3806       if (ret)
3807 	return ret;
3808     }
3809 
3810   return emit_move_complex_parts (x, y);
3811 }
3812 
3813 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3814    MODE is known to be MODE_CC.  Returns the last instruction emitted.  */
3815 
3816 static rtx_insn *
emit_move_ccmode(machine_mode mode,rtx x,rtx y)3817 emit_move_ccmode (machine_mode mode, rtx x, rtx y)
3818 {
3819   rtx_insn *ret;
3820 
3821   /* Assume all MODE_CC modes are equivalent; if we have movcc, use it.  */
3822   if (mode != CCmode)
3823     {
3824       enum insn_code code = optab_handler (mov_optab, CCmode);
3825       if (code != CODE_FOR_nothing)
3826 	{
3827 	  x = emit_move_change_mode (CCmode, mode, x, true);
3828 	  y = emit_move_change_mode (CCmode, mode, y, true);
3829 	  return emit_insn (GEN_FCN (code) (x, y));
3830 	}
3831     }
3832 
3833   /* Otherwise, find the MODE_INT mode of the same width.  */
3834   ret = emit_move_via_integer (mode, x, y, false);
3835   gcc_assert (ret != NULL);
3836   return ret;
3837 }
3838 
3839 /* Return true if word I of OP lies entirely in the
3840    undefined bits of a paradoxical subreg.  */
3841 
3842 static bool
undefined_operand_subword_p(const_rtx op,int i)3843 undefined_operand_subword_p (const_rtx op, int i)
3844 {
3845   if (GET_CODE (op) != SUBREG)
3846     return false;
3847   machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3848   poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
3849   return (known_ge (offset, GET_MODE_SIZE (innermostmode))
3850 	  || known_le (offset, -UNITS_PER_WORD));
3851 }
3852 
3853 /* A subroutine of emit_move_insn_1.  Generate a move from Y into X.
3854    MODE is any multi-word or full-word mode that lacks a move_insn
3855    pattern.  Note that you will get better code if you define such
3856    patterns, even if they must turn into multiple assembler instructions.  */
3857 
3858 static rtx_insn *
emit_move_multi_word(machine_mode mode,rtx x,rtx y)3859 emit_move_multi_word (machine_mode mode, rtx x, rtx y)
3860 {
3861   rtx_insn *last_insn = 0;
3862   rtx_insn *seq;
3863   rtx inner;
3864   bool need_clobber;
3865   int i, mode_size;
3866 
3867   /* This function can only handle cases where the number of words is
3868      known at compile time.  */
3869   mode_size = GET_MODE_SIZE (mode).to_constant ();
3870   gcc_assert (mode_size >= UNITS_PER_WORD);
3871 
3872   /* If X is a push on the stack, do the push now and replace
3873      X with a reference to the stack pointer.  */
3874   if (push_operand (x, mode))
3875     x = emit_move_resolve_push (mode, x);
3876 
3877   /* If we are in reload, see if either operand is a MEM whose address
3878      is scheduled for replacement.  */
3879   if (reload_in_progress && MEM_P (x)
3880       && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
3881     x = replace_equiv_address_nv (x, inner);
3882   if (reload_in_progress && MEM_P (y)
3883       && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
3884     y = replace_equiv_address_nv (y, inner);
3885 
3886   start_sequence ();
3887 
3888   need_clobber = false;
3889   for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
3890     {
3891       /* Do not generate code for a move if it would go entirely
3892 	 to the non-existing bits of a paradoxical subreg.  */
3893       if (undefined_operand_subword_p (x, i))
3894 	continue;
3895 
3896       rtx xpart = operand_subword (x, i, 1, mode);
3897       rtx ypart;
3898 
3899       /* Do not generate code for a move if it would come entirely
3900 	 from the undefined bits of a paradoxical subreg.  */
3901       if (undefined_operand_subword_p (y, i))
3902 	continue;
3903 
3904       ypart = operand_subword (y, i, 1, mode);
3905 
3906       /* If we can't get a part of Y, put Y into memory if it is a
3907 	 constant.  Otherwise, force it into a register.  Then we must
3908 	 be able to get a part of Y.  */
3909       if (ypart == 0 && CONSTANT_P (y))
3910 	{
3911 	  y = use_anchored_address (force_const_mem (mode, y));
3912 	  ypart = operand_subword (y, i, 1, mode);
3913 	}
3914       else if (ypart == 0)
3915 	ypart = operand_subword_force (y, i, mode);
3916 
3917       gcc_assert (xpart && ypart);
3918 
3919       need_clobber |= (GET_CODE (xpart) == SUBREG);
3920 
3921       last_insn = emit_move_insn (xpart, ypart);
3922     }
3923 
3924   seq = get_insns ();
3925   end_sequence ();
3926 
3927   /* Show the output dies here.  This is necessary for SUBREGs
3928      of pseudos since we cannot track their lifetimes correctly;
3929      hard regs shouldn't appear here except as return values.
3930      We never want to emit such a clobber after reload.  */
3931   if (x != y
3932       && ! (reload_in_progress || reload_completed)
3933       && need_clobber != 0)
3934     emit_clobber (x);
3935 
3936   emit_insn (seq);
3937 
3938   return last_insn;
3939 }
3940 
3941 /* Low level part of emit_move_insn.
3942    Called just like emit_move_insn, but assumes X and Y
3943    are basically valid.  */
3944 
3945 rtx_insn *
emit_move_insn_1(rtx x,rtx y)3946 emit_move_insn_1 (rtx x, rtx y)
3947 {
3948   machine_mode mode = GET_MODE (x);
3949   enum insn_code code;
3950 
3951   gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
3952 
3953   code = optab_handler (mov_optab, mode);
3954   if (code != CODE_FOR_nothing)
3955     return emit_insn (GEN_FCN (code) (x, y));
3956 
3957   /* Expand complex moves by moving real part and imag part.  */
3958   if (COMPLEX_MODE_P (mode))
3959     return emit_move_complex (mode, x, y);
3960 
3961   if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT
3962       || ALL_FIXED_POINT_MODE_P (mode))
3963     {
3964       rtx_insn *result = emit_move_via_integer (mode, x, y, true);
3965 
3966       /* If we can't find an integer mode, use multi words.  */
3967       if (result)
3968 	return result;
3969       else
3970 	return emit_move_multi_word (mode, x, y);
3971     }
3972 
3973   if (GET_MODE_CLASS (mode) == MODE_CC)
3974     return emit_move_ccmode (mode, x, y);
3975 
3976   /* Try using a move pattern for the corresponding integer mode.  This is
3977      only safe when simplify_subreg can convert MODE constants into integer
3978      constants.  At present, it can only do this reliably if the value
3979      fits within a HOST_WIDE_INT.  */
3980   if (!CONSTANT_P (y)
3981       || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
3982     {
3983       rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
3984 
3985       if (ret)
3986 	{
3987 	  if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
3988 	    return ret;
3989 	}
3990     }
3991 
3992   return emit_move_multi_word (mode, x, y);
3993 }
3994 
3995 /* Generate code to copy Y into X.
3996    Both Y and X must have the same mode, except that
3997    Y can be a constant with VOIDmode.
3998    This mode cannot be BLKmode; use emit_block_move for that.
3999 
4000    Return the last instruction emitted.  */
4001 
4002 rtx_insn *
emit_move_insn(rtx x,rtx y)4003 emit_move_insn (rtx x, rtx y)
4004 {
4005   machine_mode mode = GET_MODE (x);
4006   rtx y_cst = NULL_RTX;
4007   rtx_insn *last_insn;
4008   rtx set;
4009 
4010   gcc_assert (mode != BLKmode
4011 	      && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
4012 
4013   /* If we have a copy that looks like one of the following patterns:
4014        (set (subreg:M1 (reg:M2 ...)) (subreg:M1 (reg:M2 ...)))
4015        (set (subreg:M1 (reg:M2 ...)) (mem:M1 ADDR))
4016        (set (mem:M1 ADDR) (subreg:M1 (reg:M2 ...)))
4017        (set (subreg:M1 (reg:M2 ...)) (constant C))
4018      where mode M1 is equal in size to M2, try to detect whether the
4019      mode change involves an implicit round trip through memory.
4020      If so, see if we can avoid that by removing the subregs and
4021      doing the move in mode M2 instead.  */
4022 
4023   rtx x_inner = NULL_RTX;
4024   rtx y_inner = NULL_RTX;
4025 
4026   auto candidate_subreg_p = [&](rtx subreg) {
4027     return (REG_P (SUBREG_REG (subreg))
4028 	    && known_eq (GET_MODE_SIZE (GET_MODE (SUBREG_REG (subreg))),
4029 			 GET_MODE_SIZE (GET_MODE (subreg)))
4030 	    && optab_handler (mov_optab, GET_MODE (SUBREG_REG (subreg)))
4031 	       != CODE_FOR_nothing);
4032   };
4033 
4034   auto candidate_mem_p = [&](machine_mode innermode, rtx mem) {
4035     return (!targetm.can_change_mode_class (innermode, GET_MODE (mem), ALL_REGS)
4036 	    && !push_operand (mem, GET_MODE (mem))
4037 	    /* Not a candiate if innermode requires too much alignment.  */
4038 	    && (MEM_ALIGN (mem) >= GET_MODE_ALIGNMENT (innermode)
4039 		|| targetm.slow_unaligned_access (GET_MODE (mem),
4040 						  MEM_ALIGN (mem))
4041 		|| !targetm.slow_unaligned_access (innermode,
4042 						   MEM_ALIGN (mem))));
4043   };
4044 
4045   if (SUBREG_P (x) && candidate_subreg_p (x))
4046     x_inner = SUBREG_REG (x);
4047 
4048   if (SUBREG_P (y) && candidate_subreg_p (y))
4049     y_inner = SUBREG_REG (y);
4050 
4051   if (x_inner != NULL_RTX
4052       && y_inner != NULL_RTX
4053       && GET_MODE (x_inner) == GET_MODE (y_inner)
4054       && !targetm.can_change_mode_class (GET_MODE (x_inner), mode, ALL_REGS))
4055     {
4056       x = x_inner;
4057       y = y_inner;
4058       mode = GET_MODE (x_inner);
4059     }
4060   else if (x_inner != NULL_RTX
4061 	   && MEM_P (y)
4062 	   && candidate_mem_p (GET_MODE (x_inner), y))
4063     {
4064       x = x_inner;
4065       y = adjust_address (y, GET_MODE (x_inner), 0);
4066       mode = GET_MODE (x_inner);
4067     }
4068   else if (y_inner != NULL_RTX
4069 	   && MEM_P (x)
4070 	   && candidate_mem_p (GET_MODE (y_inner), x))
4071     {
4072       x = adjust_address (x, GET_MODE (y_inner), 0);
4073       y = y_inner;
4074       mode = GET_MODE (y_inner);
4075     }
4076   else if (x_inner != NULL_RTX
4077 	   && CONSTANT_P (y)
4078 	   && !targetm.can_change_mode_class (GET_MODE (x_inner),
4079 					      mode, ALL_REGS)
4080 	   && (y_inner = simplify_subreg (GET_MODE (x_inner), y, mode, 0)))
4081     {
4082       x = x_inner;
4083       y = y_inner;
4084       mode = GET_MODE (x_inner);
4085     }
4086 
4087   if (CONSTANT_P (y))
4088     {
4089       if (optimize
4090 	  && SCALAR_FLOAT_MODE_P (GET_MODE (x))
4091 	  && (last_insn = compress_float_constant (x, y)))
4092 	return last_insn;
4093 
4094       y_cst = y;
4095 
4096       if (!targetm.legitimate_constant_p (mode, y))
4097 	{
4098 	  y = force_const_mem (mode, y);
4099 
4100 	  /* If the target's cannot_force_const_mem prevented the spill,
4101 	     assume that the target's move expanders will also take care
4102 	     of the non-legitimate constant.  */
4103 	  if (!y)
4104 	    y = y_cst;
4105 	  else
4106 	    y = use_anchored_address (y);
4107 	}
4108     }
4109 
4110   /* If X or Y are memory references, verify that their addresses are valid
4111      for the machine.  */
4112   if (MEM_P (x)
4113       && (! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4114 					 MEM_ADDR_SPACE (x))
4115 	  && ! push_operand (x, GET_MODE (x))))
4116     x = validize_mem (x);
4117 
4118   if (MEM_P (y)
4119       && ! memory_address_addr_space_p (GET_MODE (y), XEXP (y, 0),
4120 					MEM_ADDR_SPACE (y)))
4121     y = validize_mem (y);
4122 
4123   gcc_assert (mode != BLKmode);
4124 
4125   last_insn = emit_move_insn_1 (x, y);
4126 
4127   if (y_cst && REG_P (x)
4128       && (set = single_set (last_insn)) != NULL_RTX
4129       && SET_DEST (set) == x
4130       && ! rtx_equal_p (y_cst, SET_SRC (set)))
4131     set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
4132 
4133   return last_insn;
4134 }
4135 
4136 /* Generate the body of an instruction to copy Y into X.
4137    It may be a list of insns, if one insn isn't enough.  */
4138 
4139 rtx_insn *
gen_move_insn(rtx x,rtx y)4140 gen_move_insn (rtx x, rtx y)
4141 {
4142   rtx_insn *seq;
4143 
4144   start_sequence ();
4145   emit_move_insn_1 (x, y);
4146   seq = get_insns ();
4147   end_sequence ();
4148   return seq;
4149 }
4150 
4151 /* If Y is representable exactly in a narrower mode, and the target can
4152    perform the extension directly from constant or memory, then emit the
4153    move as an extension.  */
4154 
4155 static rtx_insn *
compress_float_constant(rtx x,rtx y)4156 compress_float_constant (rtx x, rtx y)
4157 {
4158   machine_mode dstmode = GET_MODE (x);
4159   machine_mode orig_srcmode = GET_MODE (y);
4160   machine_mode srcmode;
4161   const REAL_VALUE_TYPE *r;
4162   int oldcost, newcost;
4163   bool speed = optimize_insn_for_speed_p ();
4164 
4165   r = CONST_DOUBLE_REAL_VALUE (y);
4166 
4167   if (targetm.legitimate_constant_p (dstmode, y))
4168     oldcost = set_src_cost (y, orig_srcmode, speed);
4169   else
4170     oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
4171 
4172   FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
4173     {
4174       enum insn_code ic;
4175       rtx trunc_y;
4176       rtx_insn *last_insn;
4177 
4178       /* Skip if the target can't extend this way.  */
4179       ic = can_extend_p (dstmode, srcmode, 0);
4180       if (ic == CODE_FOR_nothing)
4181 	continue;
4182 
4183       /* Skip if the narrowed value isn't exact.  */
4184       if (! exact_real_truncate (srcmode, r))
4185 	continue;
4186 
4187       trunc_y = const_double_from_real_value (*r, srcmode);
4188 
4189       if (targetm.legitimate_constant_p (srcmode, trunc_y))
4190 	{
4191 	  /* Skip if the target needs extra instructions to perform
4192 	     the extension.  */
4193 	  if (!insn_operand_matches (ic, 1, trunc_y))
4194 	    continue;
4195 	  /* This is valid, but may not be cheaper than the original. */
4196 	  newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
4197 				  dstmode, speed);
4198 	  if (oldcost < newcost)
4199 	    continue;
4200 	}
4201       else if (float_extend_from_mem[dstmode][srcmode])
4202 	{
4203 	  trunc_y = force_const_mem (srcmode, trunc_y);
4204 	  /* This is valid, but may not be cheaper than the original. */
4205 	  newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
4206 				  dstmode, speed);
4207 	  if (oldcost < newcost)
4208 	    continue;
4209 	  trunc_y = validize_mem (trunc_y);
4210 	}
4211       else
4212 	continue;
4213 
4214       /* For CSE's benefit, force the compressed constant pool entry
4215 	 into a new pseudo.  This constant may be used in different modes,
4216 	 and if not, combine will put things back together for us.  */
4217       trunc_y = force_reg (srcmode, trunc_y);
4218 
4219       /* If x is a hard register, perform the extension into a pseudo,
4220 	 so that e.g. stack realignment code is aware of it.  */
4221       rtx target = x;
4222       if (REG_P (x) && HARD_REGISTER_P (x))
4223 	target = gen_reg_rtx (dstmode);
4224 
4225       emit_unop_insn (ic, target, trunc_y, UNKNOWN);
4226       last_insn = get_last_insn ();
4227 
4228       if (REG_P (target))
4229 	set_unique_reg_note (last_insn, REG_EQUAL, y);
4230 
4231       if (target != x)
4232 	return emit_move_insn (x, target);
4233       return last_insn;
4234     }
4235 
4236   return NULL;
4237 }
4238 
4239 /* Pushing data onto the stack.  */
4240 
4241 /* Push a block of length SIZE (perhaps variable)
4242    and return an rtx to address the beginning of the block.
4243    The value may be virtual_outgoing_args_rtx.
4244 
4245    EXTRA is the number of bytes of padding to push in addition to SIZE.
4246    BELOW nonzero means this padding comes at low addresses;
4247    otherwise, the padding comes at high addresses.  */
4248 
4249 rtx
push_block(rtx size,poly_int64 extra,int below)4250 push_block (rtx size, poly_int64 extra, int below)
4251 {
4252   rtx temp;
4253 
4254   size = convert_modes (Pmode, ptr_mode, size, 1);
4255   if (CONSTANT_P (size))
4256     anti_adjust_stack (plus_constant (Pmode, size, extra));
4257   else if (REG_P (size) && known_eq (extra, 0))
4258     anti_adjust_stack (size);
4259   else
4260     {
4261       temp = copy_to_mode_reg (Pmode, size);
4262       if (maybe_ne (extra, 0))
4263 	temp = expand_binop (Pmode, add_optab, temp,
4264 			     gen_int_mode (extra, Pmode),
4265 			     temp, 0, OPTAB_LIB_WIDEN);
4266       anti_adjust_stack (temp);
4267     }
4268 
4269   if (STACK_GROWS_DOWNWARD)
4270     {
4271       temp = virtual_outgoing_args_rtx;
4272       if (maybe_ne (extra, 0) && below)
4273 	temp = plus_constant (Pmode, temp, extra);
4274     }
4275   else
4276     {
4277       poly_int64 csize;
4278       if (poly_int_rtx_p (size, &csize))
4279 	temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
4280 			      -csize - (below ? 0 : extra));
4281       else if (maybe_ne (extra, 0) && !below)
4282 	temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
4283 			     negate_rtx (Pmode, plus_constant (Pmode, size,
4284 							       extra)));
4285       else
4286 	temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
4287 			     negate_rtx (Pmode, size));
4288     }
4289 
4290   return memory_address (NARROWEST_INT_MODE, temp);
4291 }
4292 
4293 /* A utility routine that returns the base of an auto-inc memory, or NULL.  */
4294 
4295 static rtx
mem_autoinc_base(rtx mem)4296 mem_autoinc_base (rtx mem)
4297 {
4298   if (MEM_P (mem))
4299     {
4300       rtx addr = XEXP (mem, 0);
4301       if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4302 	return XEXP (addr, 0);
4303     }
4304   return NULL;
4305 }
4306 
4307 /* A utility routine used here, in reload, and in try_split.  The insns
4308    after PREV up to and including LAST are known to adjust the stack,
4309    with a final value of END_ARGS_SIZE.  Iterate backward from LAST
4310    placing notes as appropriate.  PREV may be NULL, indicating the
4311    entire insn sequence prior to LAST should be scanned.
4312 
4313    The set of allowed stack pointer modifications is small:
4314      (1) One or more auto-inc style memory references (aka pushes),
4315      (2) One or more addition/subtraction with the SP as destination,
4316      (3) A single move insn with the SP as destination,
4317      (4) A call_pop insn,
4318      (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
4319 
4320    Insns in the sequence that do not modify the SP are ignored,
4321    except for noreturn calls.
4322 
4323    The return value is the amount of adjustment that can be trivially
4324    verified, via immediate operand or auto-inc.  If the adjustment
4325    cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN.  */
4326 
4327 poly_int64
find_args_size_adjust(rtx_insn * insn)4328 find_args_size_adjust (rtx_insn *insn)
4329 {
4330   rtx dest, set, pat;
4331   int i;
4332 
4333   pat = PATTERN (insn);
4334   set = NULL;
4335 
4336   /* Look for a call_pop pattern.  */
4337   if (CALL_P (insn))
4338     {
4339       /* We have to allow non-call_pop patterns for the case
4340 	 of emit_single_push_insn of a TLS address.  */
4341       if (GET_CODE (pat) != PARALLEL)
4342 	return 0;
4343 
4344       /* All call_pop have a stack pointer adjust in the parallel.
4345 	 The call itself is always first, and the stack adjust is
4346 	 usually last, so search from the end.  */
4347       for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
4348 	{
4349 	  set = XVECEXP (pat, 0, i);
4350 	  if (GET_CODE (set) != SET)
4351 	    continue;
4352 	  dest = SET_DEST (set);
4353 	  if (dest == stack_pointer_rtx)
4354 	    break;
4355 	}
4356       /* We'd better have found the stack pointer adjust.  */
4357       if (i == 0)
4358 	return 0;
4359       /* Fall through to process the extracted SET and DEST
4360 	 as if it was a standalone insn.  */
4361     }
4362   else if (GET_CODE (pat) == SET)
4363     set = pat;
4364   else if ((set = single_set (insn)) != NULL)
4365     ;
4366   else if (GET_CODE (pat) == PARALLEL)
4367     {
4368       /* ??? Some older ports use a parallel with a stack adjust
4369 	 and a store for a PUSH_ROUNDING pattern, rather than a
4370 	 PRE/POST_MODIFY rtx.  Don't force them to update yet...  */
4371       /* ??? See h8300 and m68k, pushqi1.  */
4372       for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
4373 	{
4374 	  set = XVECEXP (pat, 0, i);
4375 	  if (GET_CODE (set) != SET)
4376 	    continue;
4377 	  dest = SET_DEST (set);
4378 	  if (dest == stack_pointer_rtx)
4379 	    break;
4380 
4381 	  /* We do not expect an auto-inc of the sp in the parallel.  */
4382 	  gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx);
4383 	  gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4384 			       != stack_pointer_rtx);
4385 	}
4386       if (i < 0)
4387 	return 0;
4388     }
4389   else
4390     return 0;
4391 
4392   dest = SET_DEST (set);
4393 
4394   /* Look for direct modifications of the stack pointer.  */
4395   if (REG_P (dest) && REGNO (dest) == STACK_POINTER_REGNUM)
4396     {
4397       /* Look for a trivial adjustment, otherwise assume nothing.  */
4398       /* Note that the SPU restore_stack_block pattern refers to
4399 	 the stack pointer in V4SImode.  Consider that non-trivial.  */
4400       poly_int64 offset;
4401       if (SCALAR_INT_MODE_P (GET_MODE (dest))
4402 	  && strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
4403 	return offset;
4404       /* ??? Reload can generate no-op moves, which will be cleaned
4405 	 up later.  Recognize it and continue searching.  */
4406       else if (rtx_equal_p (dest, SET_SRC (set)))
4407 	return 0;
4408       else
4409 	return HOST_WIDE_INT_MIN;
4410     }
4411   else
4412     {
4413       rtx mem, addr;
4414 
4415       /* Otherwise only think about autoinc patterns.  */
4416       if (mem_autoinc_base (dest) == stack_pointer_rtx)
4417 	{
4418 	  mem = dest;
4419 	  gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4420 			       != stack_pointer_rtx);
4421 	}
4422       else if (mem_autoinc_base (SET_SRC (set)) == stack_pointer_rtx)
4423 	mem = SET_SRC (set);
4424       else
4425 	return 0;
4426 
4427       addr = XEXP (mem, 0);
4428       switch (GET_CODE (addr))
4429 	{
4430 	case PRE_INC:
4431 	case POST_INC:
4432 	  return GET_MODE_SIZE (GET_MODE (mem));
4433 	case PRE_DEC:
4434 	case POST_DEC:
4435 	  return -GET_MODE_SIZE (GET_MODE (mem));
4436 	case PRE_MODIFY:
4437 	case POST_MODIFY:
4438 	  addr = XEXP (addr, 1);
4439 	  gcc_assert (GET_CODE (addr) == PLUS);
4440 	  gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
4441 	  return rtx_to_poly_int64 (XEXP (addr, 1));
4442 	default:
4443 	  gcc_unreachable ();
4444 	}
4445     }
4446 }
4447 
4448 poly_int64
fixup_args_size_notes(rtx_insn * prev,rtx_insn * last,poly_int64 end_args_size)4449 fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
4450 		       poly_int64 end_args_size)
4451 {
4452   poly_int64 args_size = end_args_size;
4453   bool saw_unknown = false;
4454   rtx_insn *insn;
4455 
4456   for (insn = last; insn != prev; insn = PREV_INSN (insn))
4457     {
4458       if (!NONDEBUG_INSN_P (insn))
4459 	continue;
4460 
4461       /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4462 	 a call argument containing a TLS address that itself requires
4463 	 a call to __tls_get_addr.  The handling of stack_pointer_delta
4464 	 in emit_single_push_insn is supposed to ensure that any such
4465 	 notes are already correct.  */
4466       rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
4467       gcc_assert (!note || known_eq (args_size, get_args_size (note)));
4468 
4469       poly_int64 this_delta = find_args_size_adjust (insn);
4470       if (known_eq (this_delta, 0))
4471 	{
4472 	  if (!CALL_P (insn)
4473 	      || ACCUMULATE_OUTGOING_ARGS
4474 	      || find_reg_note (insn, REG_NORETURN, NULL_RTX) == NULL_RTX)
4475 	    continue;
4476 	}
4477 
4478       gcc_assert (!saw_unknown);
4479       if (known_eq (this_delta, HOST_WIDE_INT_MIN))
4480 	saw_unknown = true;
4481 
4482       if (!note)
4483 	add_args_size_note (insn, args_size);
4484       if (STACK_GROWS_DOWNWARD)
4485 	this_delta = -poly_uint64 (this_delta);
4486 
4487       if (saw_unknown)
4488 	args_size = HOST_WIDE_INT_MIN;
4489       else
4490 	args_size -= this_delta;
4491     }
4492 
4493   return args_size;
4494 }
4495 
4496 #ifdef PUSH_ROUNDING
4497 /* Emit single push insn.  */
4498 
4499 static void
emit_single_push_insn_1(machine_mode mode,rtx x,tree type)4500 emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
4501 {
4502   rtx dest_addr;
4503   poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
4504   rtx dest;
4505   enum insn_code icode;
4506 
4507   /* If there is push pattern, use it.  Otherwise try old way of throwing
4508      MEM representing push operation to move expander.  */
4509   icode = optab_handler (push_optab, mode);
4510   if (icode != CODE_FOR_nothing)
4511     {
4512       class expand_operand ops[1];
4513 
4514       create_input_operand (&ops[0], x, mode);
4515       if (maybe_expand_insn (icode, 1, ops))
4516 	return;
4517     }
4518   if (known_eq (GET_MODE_SIZE (mode), rounded_size))
4519     dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
4520   /* If we are to pad downward, adjust the stack pointer first and
4521      then store X into the stack location using an offset.  This is
4522      because emit_move_insn does not know how to pad; it does not have
4523      access to type.  */
4524   else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
4525     {
4526       emit_move_insn (stack_pointer_rtx,
4527 		      expand_binop (Pmode,
4528 				    STACK_GROWS_DOWNWARD ? sub_optab
4529 				    : add_optab,
4530 				    stack_pointer_rtx,
4531 				    gen_int_mode (rounded_size, Pmode),
4532 				    NULL_RTX, 0, OPTAB_LIB_WIDEN));
4533 
4534       poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
4535       if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
4536 	/* We have already decremented the stack pointer, so get the
4537 	   previous value.  */
4538 	offset += rounded_size;
4539 
4540       if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
4541 	/* We have already incremented the stack pointer, so get the
4542 	   previous value.  */
4543 	offset -= rounded_size;
4544 
4545       dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
4546     }
4547   else
4548     {
4549       if (STACK_GROWS_DOWNWARD)
4550 	/* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC.  */
4551 	dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
4552       else
4553 	/* ??? This seems wrong if STACK_PUSH_CODE == POST_INC.  */
4554 	dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
4555 
4556       dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
4557     }
4558 
4559   dest = gen_rtx_MEM (mode, dest_addr);
4560 
4561   if (type != 0)
4562     {
4563       set_mem_attributes (dest, type, 1);
4564 
4565       if (cfun->tail_call_marked)
4566 	/* Function incoming arguments may overlap with sibling call
4567 	   outgoing arguments and we cannot allow reordering of reads
4568 	   from function arguments with stores to outgoing arguments
4569 	   of sibling calls.  */
4570 	set_mem_alias_set (dest, 0);
4571     }
4572   emit_move_insn (dest, x);
4573 }
4574 
4575 /* Emit and annotate a single push insn.  */
4576 
4577 static void
emit_single_push_insn(machine_mode mode,rtx x,tree type)4578 emit_single_push_insn (machine_mode mode, rtx x, tree type)
4579 {
4580   poly_int64 delta, old_delta = stack_pointer_delta;
4581   rtx_insn *prev = get_last_insn ();
4582   rtx_insn *last;
4583 
4584   emit_single_push_insn_1 (mode, x, type);
4585 
4586   /* Adjust stack_pointer_delta to describe the situation after the push
4587      we just performed.  Note that we must do this after the push rather
4588      than before the push in case calculating X needs pushes and pops of
4589      its own (e.g. if calling __tls_get_addr).  The REG_ARGS_SIZE notes
4590      for such pushes and pops must not include the effect of the future
4591      push of X.  */
4592   stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
4593 
4594   last = get_last_insn ();
4595 
4596   /* Notice the common case where we emitted exactly one insn.  */
4597   if (PREV_INSN (last) == prev)
4598     {
4599       add_args_size_note (last, stack_pointer_delta);
4600       return;
4601     }
4602 
4603   delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
4604   gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
4605 	      || known_eq (delta, old_delta));
4606 }
4607 #endif
4608 
4609 /* If reading SIZE bytes from X will end up reading from
4610    Y return the number of bytes that overlap.  Return -1
4611    if there is no overlap or -2 if we can't determine
4612    (for example when X and Y have different base registers).  */
4613 
4614 static int
memory_load_overlap(rtx x,rtx y,HOST_WIDE_INT size)4615 memory_load_overlap (rtx x, rtx y, HOST_WIDE_INT size)
4616 {
4617   rtx tmp = plus_constant (Pmode, x, size);
4618   rtx sub = simplify_gen_binary (MINUS, Pmode, tmp, y);
4619 
4620   if (!CONST_INT_P (sub))
4621     return -2;
4622 
4623   HOST_WIDE_INT val = INTVAL (sub);
4624 
4625   return IN_RANGE (val, 1, size) ? val : -1;
4626 }
4627 
4628 /* Generate code to push X onto the stack, assuming it has mode MODE and
4629    type TYPE.
4630    MODE is redundant except when X is a CONST_INT (since they don't
4631    carry mode info).
4632    SIZE is an rtx for the size of data to be copied (in bytes),
4633    needed only if X is BLKmode.
4634    Return true if successful.  May return false if asked to push a
4635    partial argument during a sibcall optimization (as specified by
4636    SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4637    to not overlap.
4638 
4639    ALIGN (in bits) is maximum alignment we can assume.
4640 
4641    If PARTIAL and REG are both nonzero, then copy that many of the first
4642    bytes of X into registers starting with REG, and push the rest of X.
4643    The amount of space pushed is decreased by PARTIAL bytes.
4644    REG must be a hard register in this case.
4645    If REG is zero but PARTIAL is not, take any all others actions for an
4646    argument partially in registers, but do not actually load any
4647    registers.
4648 
4649    EXTRA is the amount in bytes of extra space to leave next to this arg.
4650    This is ignored if an argument block has already been allocated.
4651 
4652    On a machine that lacks real push insns, ARGS_ADDR is the address of
4653    the bottom of the argument block for this call.  We use indexing off there
4654    to store the arg.  On machines with push insns, ARGS_ADDR is 0 when a
4655    argument block has not been preallocated.
4656 
4657    ARGS_SO_FAR is the size of args previously pushed for this call.
4658 
4659    REG_PARM_STACK_SPACE is nonzero if functions require stack space
4660    for arguments passed in registers.  If nonzero, it will be the number
4661    of bytes required.  */
4662 
4663 bool
emit_push_insn(rtx x,machine_mode mode,tree type,rtx size,unsigned int align,int partial,rtx reg,poly_int64 extra,rtx args_addr,rtx args_so_far,int reg_parm_stack_space,rtx alignment_pad,bool sibcall_p)4664 emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
4665 		unsigned int align, int partial, rtx reg, poly_int64 extra,
4666 		rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
4667 		rtx alignment_pad, bool sibcall_p)
4668 {
4669   rtx xinner;
4670   pad_direction stack_direction
4671     = STACK_GROWS_DOWNWARD ? PAD_DOWNWARD : PAD_UPWARD;
4672 
4673   /* Decide where to pad the argument: PAD_DOWNWARD for below,
4674      PAD_UPWARD for above, or PAD_NONE for don't pad it.
4675      Default is below for small data on big-endian machines; else above.  */
4676   pad_direction where_pad = targetm.calls.function_arg_padding (mode, type);
4677 
4678   /* Invert direction if stack is post-decrement.
4679      FIXME: why?  */
4680   if (STACK_PUSH_CODE == POST_DEC)
4681     if (where_pad != PAD_NONE)
4682       where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD);
4683 
4684   xinner = x;
4685 
4686   int nregs = partial / UNITS_PER_WORD;
4687   rtx *tmp_regs = NULL;
4688   int overlapping = 0;
4689 
4690   if (mode == BLKmode
4691       || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)
4692 	  && type != NULL_TREE))
4693     {
4694       /* Copy a block into the stack, entirely or partially.  */
4695 
4696       rtx temp;
4697       int used;
4698       int offset;
4699       int skip;
4700 
4701       offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4702       used = partial - offset;
4703 
4704       if (mode != BLKmode)
4705 	{
4706 	  /* A value is to be stored in an insufficiently aligned
4707 	     stack slot; copy via a suitably aligned slot if
4708 	     necessary.  */
4709 	  size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
4710 	  if (!MEM_P (xinner))
4711 	    {
4712 	      temp = assign_temp (type, 1, 1);
4713 	      emit_move_insn (temp, xinner);
4714 	      xinner = temp;
4715 	    }
4716 	}
4717 
4718       gcc_assert (size);
4719 
4720       /* USED is now the # of bytes we need not copy to the stack
4721 	 because registers will take care of them.  */
4722 
4723       if (partial != 0)
4724 	xinner = adjust_address (xinner, BLKmode, used);
4725 
4726       /* If the partial register-part of the arg counts in its stack size,
4727 	 skip the part of stack space corresponding to the registers.
4728 	 Otherwise, start copying to the beginning of the stack space,
4729 	 by setting SKIP to 0.  */
4730       skip = (reg_parm_stack_space == 0) ? 0 : used;
4731 
4732 #ifdef PUSH_ROUNDING
4733       /* NB: Let the backend known the number of bytes to push and
4734 	 decide if push insns should be generated.  */
4735       unsigned int push_size;
4736       if (CONST_INT_P (size))
4737 	push_size = INTVAL (size);
4738       else
4739 	push_size = 0;
4740 
4741       /* Do it with several push insns if that doesn't take lots of insns
4742 	 and if there is no difficulty with push insns that skip bytes
4743 	 on the stack for alignment purposes.  */
4744       if (args_addr == 0
4745 	  && targetm.calls.push_argument (push_size)
4746 	  && CONST_INT_P (size)
4747 	  && skip == 0
4748 	  && MEM_ALIGN (xinner) >= align
4749 	  && can_move_by_pieces ((unsigned) INTVAL (size) - used, align)
4750 	  /* Here we avoid the case of a structure whose weak alignment
4751 	     forces many pushes of a small amount of data,
4752 	     and such small pushes do rounding that causes trouble.  */
4753 	  && ((!targetm.slow_unaligned_access (word_mode, align))
4754 	      || align >= BIGGEST_ALIGNMENT
4755 	      || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
4756 			   align / BITS_PER_UNIT))
4757 	  && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
4758 	{
4759 	  /* Push padding now if padding above and stack grows down,
4760 	     or if padding below and stack grows up.
4761 	     But if space already allocated, this has already been done.  */
4762 	  if (maybe_ne (extra, 0)
4763 	      && args_addr == 0
4764 	      && where_pad != PAD_NONE
4765 	      && where_pad != stack_direction)
4766 	    anti_adjust_stack (gen_int_mode (extra, Pmode));
4767 
4768 	  move_by_pieces (NULL, xinner, INTVAL (size) - used, align,
4769 			  RETURN_BEGIN);
4770 	}
4771       else
4772 #endif /* PUSH_ROUNDING  */
4773 	{
4774 	  rtx target;
4775 
4776 	  /* Otherwise make space on the stack and copy the data
4777 	     to the address of that space.  */
4778 
4779 	  /* Deduct words put into registers from the size we must copy.  */
4780 	  if (partial != 0)
4781 	    {
4782 	      if (CONST_INT_P (size))
4783 		size = GEN_INT (INTVAL (size) - used);
4784 	      else
4785 		size = expand_binop (GET_MODE (size), sub_optab, size,
4786 				     gen_int_mode (used, GET_MODE (size)),
4787 				     NULL_RTX, 0, OPTAB_LIB_WIDEN);
4788 	    }
4789 
4790 	  /* Get the address of the stack space.
4791 	     In this case, we do not deal with EXTRA separately.
4792 	     A single stack adjust will do.  */
4793 	  poly_int64 const_args_so_far;
4794 	  if (! args_addr)
4795 	    {
4796 	      temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
4797 	      extra = 0;
4798 	    }
4799 	  else if (poly_int_rtx_p (args_so_far, &const_args_so_far))
4800 	    temp = memory_address (BLKmode,
4801 				   plus_constant (Pmode, args_addr,
4802 						  skip + const_args_so_far));
4803 	  else
4804 	    temp = memory_address (BLKmode,
4805 				   plus_constant (Pmode,
4806 						  gen_rtx_PLUS (Pmode,
4807 								args_addr,
4808 								args_so_far),
4809 						  skip));
4810 
4811 	  if (!ACCUMULATE_OUTGOING_ARGS)
4812 	    {
4813 	      /* If the source is referenced relative to the stack pointer,
4814 		 copy it to another register to stabilize it.  We do not need
4815 		 to do this if we know that we won't be changing sp.  */
4816 
4817 	      if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
4818 		  || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
4819 		temp = copy_to_reg (temp);
4820 	    }
4821 
4822 	  target = gen_rtx_MEM (BLKmode, temp);
4823 
4824 	  /* We do *not* set_mem_attributes here, because incoming arguments
4825 	     may overlap with sibling call outgoing arguments and we cannot
4826 	     allow reordering of reads from function arguments with stores
4827 	     to outgoing arguments of sibling calls.  We do, however, want
4828 	     to record the alignment of the stack slot.  */
4829 	  /* ALIGN may well be better aligned than TYPE, e.g. due to
4830 	     PARM_BOUNDARY.  Assume the caller isn't lying.  */
4831 	  set_mem_align (target, align);
4832 
4833 	  /* If part should go in registers and pushing to that part would
4834 	     overwrite some of the values that need to go into regs, load the
4835 	     overlapping values into temporary pseudos to be moved into the hard
4836 	     regs at the end after the stack pushing has completed.
4837 	     We cannot load them directly into the hard regs here because
4838 	     they can be clobbered by the block move expansions.
4839 	     See PR 65358.  */
4840 
4841 	  if (partial > 0 && reg != 0 && mode == BLKmode
4842 	      && GET_CODE (reg) != PARALLEL)
4843 	    {
4844 	      overlapping = memory_load_overlap (XEXP (x, 0), temp, partial);
4845 	      if (overlapping > 0)
4846 	        {
4847 		  gcc_assert (overlapping % UNITS_PER_WORD == 0);
4848 		  overlapping /= UNITS_PER_WORD;
4849 
4850 		  tmp_regs = XALLOCAVEC (rtx, overlapping);
4851 
4852 		  for (int i = 0; i < overlapping; i++)
4853 		    tmp_regs[i] = gen_reg_rtx (word_mode);
4854 
4855 		  for (int i = 0; i < overlapping; i++)
4856 		    emit_move_insn (tmp_regs[i],
4857 				    operand_subword_force (target, i, mode));
4858 	        }
4859 	      else if (overlapping == -1)
4860 		overlapping = 0;
4861 	      /* Could not determine whether there is overlap.
4862 	         Fail the sibcall.  */
4863 	      else
4864 		{
4865 		  overlapping = 0;
4866 		  if (sibcall_p)
4867 		    return false;
4868 		}
4869 	    }
4870 	  emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM);
4871 	}
4872     }
4873   else if (partial > 0)
4874     {
4875       /* Scalar partly in registers.  This case is only supported
4876 	 for fixed-wdth modes.  */
4877       int num_words = GET_MODE_SIZE (mode).to_constant ();
4878       num_words /= UNITS_PER_WORD;
4879       int i;
4880       int not_stack;
4881       /* # bytes of start of argument
4882 	 that we must make space for but need not store.  */
4883       int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4884       int args_offset = INTVAL (args_so_far);
4885       int skip;
4886 
4887       /* Push padding now if padding above and stack grows down,
4888 	 or if padding below and stack grows up.
4889 	 But if space already allocated, this has already been done.  */
4890       if (maybe_ne (extra, 0)
4891 	  && args_addr == 0
4892 	  && where_pad != PAD_NONE
4893 	  && where_pad != stack_direction)
4894 	anti_adjust_stack (gen_int_mode (extra, Pmode));
4895 
4896       /* If we make space by pushing it, we might as well push
4897 	 the real data.  Otherwise, we can leave OFFSET nonzero
4898 	 and leave the space uninitialized.  */
4899       if (args_addr == 0)
4900 	offset = 0;
4901 
4902       /* Now NOT_STACK gets the number of words that we don't need to
4903 	 allocate on the stack.  Convert OFFSET to words too.  */
4904       not_stack = (partial - offset) / UNITS_PER_WORD;
4905       offset /= UNITS_PER_WORD;
4906 
4907       /* If the partial register-part of the arg counts in its stack size,
4908 	 skip the part of stack space corresponding to the registers.
4909 	 Otherwise, start copying to the beginning of the stack space,
4910 	 by setting SKIP to 0.  */
4911       skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
4912 
4913       if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
4914 	x = validize_mem (force_const_mem (mode, x));
4915 
4916       /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4917 	 SUBREGs of such registers are not allowed.  */
4918       if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4919 	   && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
4920 	x = copy_to_reg (x);
4921 
4922       /* Loop over all the words allocated on the stack for this arg.  */
4923       /* We can do it by words, because any scalar bigger than a word
4924 	 has a size a multiple of a word.  */
4925       for (i = num_words - 1; i >= not_stack; i--)
4926 	if (i >= not_stack + offset)
4927 	  if (!emit_push_insn (operand_subword_force (x, i, mode),
4928 			  word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
4929 			  0, args_addr,
4930 			  GEN_INT (args_offset + ((i - not_stack + skip)
4931 						  * UNITS_PER_WORD)),
4932 			  reg_parm_stack_space, alignment_pad, sibcall_p))
4933 	    return false;
4934     }
4935   else
4936     {
4937       rtx addr;
4938       rtx dest;
4939 
4940       /* Push padding now if padding above and stack grows down,
4941 	 or if padding below and stack grows up.
4942 	 But if space already allocated, this has already been done.  */
4943       if (maybe_ne (extra, 0)
4944 	  && args_addr == 0
4945 	  && where_pad != PAD_NONE
4946 	  && where_pad != stack_direction)
4947 	anti_adjust_stack (gen_int_mode (extra, Pmode));
4948 
4949 #ifdef PUSH_ROUNDING
4950       if (args_addr == 0 && targetm.calls.push_argument (0))
4951 	emit_single_push_insn (mode, x, type);
4952       else
4953 #endif
4954 	{
4955 	  addr = simplify_gen_binary (PLUS, Pmode, args_addr, args_so_far);
4956 	  dest = gen_rtx_MEM (mode, memory_address (mode, addr));
4957 
4958 	  /* We do *not* set_mem_attributes here, because incoming arguments
4959 	     may overlap with sibling call outgoing arguments and we cannot
4960 	     allow reordering of reads from function arguments with stores
4961 	     to outgoing arguments of sibling calls.  We do, however, want
4962 	     to record the alignment of the stack slot.  */
4963 	  /* ALIGN may well be better aligned than TYPE, e.g. due to
4964 	     PARM_BOUNDARY.  Assume the caller isn't lying.  */
4965 	  set_mem_align (dest, align);
4966 
4967 	  emit_move_insn (dest, x);
4968 	}
4969     }
4970 
4971   /* Move the partial arguments into the registers and any overlapping
4972      values that we moved into the pseudos in tmp_regs.  */
4973   if (partial > 0 && reg != 0)
4974     {
4975       /* Handle calls that pass values in multiple non-contiguous locations.
4976 	 The Irix 6 ABI has examples of this.  */
4977       if (GET_CODE (reg) == PARALLEL)
4978 	emit_group_load (reg, x, type, -1);
4979       else
4980         {
4981 	  gcc_assert (partial % UNITS_PER_WORD == 0);
4982 	  move_block_to_reg (REGNO (reg), x, nregs - overlapping, mode);
4983 
4984 	  for (int i = 0; i < overlapping; i++)
4985 	    emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)
4986 						    + nregs - overlapping + i),
4987 			    tmp_regs[i]);
4988 
4989 	}
4990     }
4991 
4992   if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
4993     anti_adjust_stack (gen_int_mode (extra, Pmode));
4994 
4995   if (alignment_pad && args_addr == 0)
4996     anti_adjust_stack (alignment_pad);
4997 
4998   return true;
4999 }
5000 
5001 /* Return X if X can be used as a subtarget in a sequence of arithmetic
5002    operations.  */
5003 
5004 static rtx
get_subtarget(rtx x)5005 get_subtarget (rtx x)
5006 {
5007   return (optimize
5008           || x == 0
5009 	   /* Only registers can be subtargets.  */
5010 	   || !REG_P (x)
5011 	   /* Don't use hard regs to avoid extending their life.  */
5012 	   || REGNO (x) < FIRST_PSEUDO_REGISTER
5013 	  ? 0 : x);
5014 }
5015 
5016 /* A subroutine of expand_assignment.  Optimize FIELD op= VAL, where
5017    FIELD is a bitfield.  Returns true if the optimization was successful,
5018    and there's nothing else to do.  */
5019 
5020 static bool
optimize_bitfield_assignment_op(poly_uint64 pbitsize,poly_uint64 pbitpos,poly_uint64 pbitregion_start,poly_uint64 pbitregion_end,machine_mode mode1,rtx str_rtx,tree to,tree src,bool reverse)5021 optimize_bitfield_assignment_op (poly_uint64 pbitsize,
5022 				 poly_uint64 pbitpos,
5023 				 poly_uint64 pbitregion_start,
5024 				 poly_uint64 pbitregion_end,
5025 				 machine_mode mode1, rtx str_rtx,
5026 				 tree to, tree src, bool reverse)
5027 {
5028   /* str_mode is not guaranteed to be a scalar type.  */
5029   machine_mode str_mode = GET_MODE (str_rtx);
5030   unsigned int str_bitsize;
5031   tree op0, op1;
5032   rtx value, result;
5033   optab binop;
5034   gimple *srcstmt;
5035   enum tree_code code;
5036 
5037   unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
5038   if (mode1 != VOIDmode
5039       || !pbitsize.is_constant (&bitsize)
5040       || !pbitpos.is_constant (&bitpos)
5041       || !pbitregion_start.is_constant (&bitregion_start)
5042       || !pbitregion_end.is_constant (&bitregion_end)
5043       || bitsize >= BITS_PER_WORD
5044       || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
5045       || str_bitsize > BITS_PER_WORD
5046       || TREE_SIDE_EFFECTS (to)
5047       || TREE_THIS_VOLATILE (to))
5048     return false;
5049 
5050   STRIP_NOPS (src);
5051   if (TREE_CODE (src) != SSA_NAME)
5052     return false;
5053   if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
5054     return false;
5055 
5056   srcstmt = get_gimple_for_ssa_name (src);
5057   if (!srcstmt
5058       || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
5059     return false;
5060 
5061   code = gimple_assign_rhs_code (srcstmt);
5062 
5063   op0 = gimple_assign_rhs1 (srcstmt);
5064 
5065   /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
5066      to find its initialization.  Hopefully the initialization will
5067      be from a bitfield load.  */
5068   if (TREE_CODE (op0) == SSA_NAME)
5069     {
5070       gimple *op0stmt = get_gimple_for_ssa_name (op0);
5071 
5072       /* We want to eventually have OP0 be the same as TO, which
5073 	 should be a bitfield.  */
5074       if (!op0stmt
5075 	  || !is_gimple_assign (op0stmt)
5076 	  || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
5077 	return false;
5078       op0 = gimple_assign_rhs1 (op0stmt);
5079     }
5080 
5081   op1 = gimple_assign_rhs2 (srcstmt);
5082 
5083   if (!operand_equal_p (to, op0, 0))
5084     return false;
5085 
5086   if (MEM_P (str_rtx))
5087     {
5088       unsigned HOST_WIDE_INT offset1;
5089 
5090       if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
5091 	str_bitsize = BITS_PER_WORD;
5092 
5093       scalar_int_mode best_mode;
5094       if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end,
5095 			  MEM_ALIGN (str_rtx), str_bitsize, false, &best_mode))
5096 	return false;
5097       str_mode = best_mode;
5098       str_bitsize = GET_MODE_BITSIZE (best_mode);
5099 
5100       offset1 = bitpos;
5101       bitpos %= str_bitsize;
5102       offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
5103       str_rtx = adjust_address (str_rtx, str_mode, offset1);
5104     }
5105   else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
5106     return false;
5107 
5108   /* If the bit field covers the whole REG/MEM, store_field
5109      will likely generate better code.  */
5110   if (bitsize >= str_bitsize)
5111     return false;
5112 
5113   /* We can't handle fields split across multiple entities.  */
5114   if (bitpos + bitsize > str_bitsize)
5115     return false;
5116 
5117   if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
5118     bitpos = str_bitsize - bitpos - bitsize;
5119 
5120   switch (code)
5121     {
5122     case PLUS_EXPR:
5123     case MINUS_EXPR:
5124       /* For now, just optimize the case of the topmost bitfield
5125 	 where we don't need to do any masking and also
5126 	 1 bit bitfields where xor can be used.
5127 	 We might win by one instruction for the other bitfields
5128 	 too if insv/extv instructions aren't used, so that
5129 	 can be added later.  */
5130       if ((reverse || bitpos + bitsize != str_bitsize)
5131 	  && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
5132 	break;
5133 
5134       value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
5135       value = convert_modes (str_mode,
5136 			     TYPE_MODE (TREE_TYPE (op1)), value,
5137 			     TYPE_UNSIGNED (TREE_TYPE (op1)));
5138 
5139       /* We may be accessing data outside the field, which means
5140 	 we can alias adjacent data.  */
5141       if (MEM_P (str_rtx))
5142 	{
5143 	  str_rtx = shallow_copy_rtx (str_rtx);
5144 	  set_mem_alias_set (str_rtx, 0);
5145 	  set_mem_expr (str_rtx, 0);
5146 	}
5147 
5148       if (bitsize == 1 && (reverse || bitpos + bitsize != str_bitsize))
5149 	{
5150 	  value = expand_and (str_mode, value, const1_rtx, NULL);
5151 	  binop = xor_optab;
5152 	}
5153       else
5154 	binop = code == PLUS_EXPR ? add_optab : sub_optab;
5155 
5156       value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
5157       if (reverse)
5158 	value = flip_storage_order (str_mode, value);
5159       result = expand_binop (str_mode, binop, str_rtx,
5160 			     value, str_rtx, 1, OPTAB_WIDEN);
5161       if (result != str_rtx)
5162 	emit_move_insn (str_rtx, result);
5163       return true;
5164 
5165     case BIT_IOR_EXPR:
5166     case BIT_XOR_EXPR:
5167       if (TREE_CODE (op1) != INTEGER_CST)
5168 	break;
5169       value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
5170       value = convert_modes (str_mode,
5171 			     TYPE_MODE (TREE_TYPE (op1)), value,
5172 			     TYPE_UNSIGNED (TREE_TYPE (op1)));
5173 
5174       /* We may be accessing data outside the field, which means
5175 	 we can alias adjacent data.  */
5176       if (MEM_P (str_rtx))
5177 	{
5178 	  str_rtx = shallow_copy_rtx (str_rtx);
5179 	  set_mem_alias_set (str_rtx, 0);
5180 	  set_mem_expr (str_rtx, 0);
5181 	}
5182 
5183       binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
5184       if (bitpos + bitsize != str_bitsize)
5185 	{
5186 	  rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1,
5187 				   str_mode);
5188 	  value = expand_and (str_mode, value, mask, NULL_RTX);
5189 	}
5190       value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
5191       if (reverse)
5192 	value = flip_storage_order (str_mode, value);
5193       result = expand_binop (str_mode, binop, str_rtx,
5194 			     value, str_rtx, 1, OPTAB_WIDEN);
5195       if (result != str_rtx)
5196 	emit_move_insn (str_rtx, result);
5197       return true;
5198 
5199     default:
5200       break;
5201     }
5202 
5203   return false;
5204 }
5205 
5206 /* In the C++ memory model, consecutive bit fields in a structure are
5207    considered one memory location.
5208 
5209    Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
5210    returns the bit range of consecutive bits in which this COMPONENT_REF
5211    belongs.  The values are returned in *BITSTART and *BITEND.  *BITPOS
5212    and *OFFSET may be adjusted in the process.
5213 
5214    If the access does not need to be restricted, 0 is returned in both
5215    *BITSTART and *BITEND.  */
5216 
5217 void
get_bit_range(poly_uint64_pod * bitstart,poly_uint64_pod * bitend,tree exp,poly_int64_pod * bitpos,tree * offset)5218 get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
5219 	       poly_int64_pod *bitpos, tree *offset)
5220 {
5221   poly_int64 bitoffset;
5222   tree field, repr;
5223 
5224   gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
5225 
5226   field = TREE_OPERAND (exp, 1);
5227   repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
5228   /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
5229      need to limit the range we can access.  */
5230   if (!repr)
5231     {
5232       *bitstart = *bitend = 0;
5233       return;
5234     }
5235 
5236   /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
5237      part of a larger bit field, then the representative does not serve any
5238      useful purpose.  This can occur in Ada.  */
5239   if (handled_component_p (TREE_OPERAND (exp, 0)))
5240     {
5241       machine_mode rmode;
5242       poly_int64 rbitsize, rbitpos;
5243       tree roffset;
5244       int unsignedp, reversep, volatilep = 0;
5245       get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
5246 			   &roffset, &rmode, &unsignedp, &reversep,
5247 			   &volatilep);
5248       if (!multiple_p (rbitpos, BITS_PER_UNIT))
5249 	{
5250 	  *bitstart = *bitend = 0;
5251 	  return;
5252 	}
5253     }
5254 
5255   /* Compute the adjustment to bitpos from the offset of the field
5256      relative to the representative.  DECL_FIELD_OFFSET of field and
5257      repr are the same by construction if they are not constants,
5258      see finish_bitfield_layout.  */
5259   poly_uint64 field_offset, repr_offset;
5260   if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
5261       && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
5262     bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
5263   else
5264     bitoffset = 0;
5265   bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
5266 		- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
5267 
5268   /* If the adjustment is larger than bitpos, we would have a negative bit
5269      position for the lower bound and this may wreak havoc later.  Adjust
5270      offset and bitpos to make the lower bound non-negative in that case.  */
5271   if (maybe_gt (bitoffset, *bitpos))
5272     {
5273       poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
5274       poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
5275 
5276       *bitpos += adjust_bits;
5277       if (*offset == NULL_TREE)
5278 	*offset = size_int (-adjust_bytes);
5279       else
5280 	*offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
5281       *bitstart = 0;
5282     }
5283   else
5284     *bitstart = *bitpos - bitoffset;
5285 
5286   *bitend = *bitstart + tree_to_poly_uint64 (DECL_SIZE (repr)) - 1;
5287 }
5288 
5289 /* Returns true if BASE is a DECL that does not reside in memory and
5290    has non-BLKmode.  DECL_RTL must not be a MEM; if
5291    DECL_RTL was not set yet, return false.  */
5292 
5293 bool
non_mem_decl_p(tree base)5294 non_mem_decl_p (tree base)
5295 {
5296   if (!DECL_P (base)
5297       || TREE_ADDRESSABLE (base)
5298       || DECL_MODE (base) == BLKmode)
5299     return false;
5300 
5301   if (!DECL_RTL_SET_P (base))
5302     return false;
5303 
5304   return (!MEM_P (DECL_RTL (base)));
5305 }
5306 
5307 /* Returns true if REF refers to an object that does not
5308    reside in memory and has non-BLKmode.  */
5309 
5310 bool
mem_ref_refers_to_non_mem_p(tree ref)5311 mem_ref_refers_to_non_mem_p (tree ref)
5312 {
5313   tree base;
5314 
5315   if (TREE_CODE (ref) == MEM_REF
5316       || TREE_CODE (ref) == TARGET_MEM_REF)
5317     {
5318       tree addr = TREE_OPERAND (ref, 0);
5319 
5320       if (TREE_CODE (addr) != ADDR_EXPR)
5321 	return false;
5322 
5323       base = TREE_OPERAND (addr, 0);
5324     }
5325   else
5326     base = ref;
5327 
5328   return non_mem_decl_p (base);
5329 }
5330 
5331 /* Expand an assignment that stores the value of FROM into TO.  If NONTEMPORAL
5332    is true, try generating a nontemporal store.  */
5333 
5334 void
expand_assignment(tree to,tree from,bool nontemporal)5335 expand_assignment (tree to, tree from, bool nontemporal)
5336 {
5337   rtx to_rtx = 0;
5338   rtx result;
5339   machine_mode mode;
5340   unsigned int align;
5341   enum insn_code icode;
5342 
5343   /* Don't crash if the lhs of the assignment was erroneous.  */
5344   if (TREE_CODE (to) == ERROR_MARK)
5345     {
5346       expand_normal (from);
5347       return;
5348     }
5349 
5350   /* Optimize away no-op moves without side-effects.  */
5351   if (operand_equal_p (to, from, 0))
5352     return;
5353 
5354   /* Handle misaligned stores.  */
5355   mode = TYPE_MODE (TREE_TYPE (to));
5356   if ((TREE_CODE (to) == MEM_REF
5357        || TREE_CODE (to) == TARGET_MEM_REF
5358        || DECL_P (to))
5359       && mode != BLKmode
5360       && !mem_ref_refers_to_non_mem_p (to)
5361       && ((align = get_object_alignment (to))
5362 	  < GET_MODE_ALIGNMENT (mode))
5363       && (((icode = optab_handler (movmisalign_optab, mode))
5364 	   != CODE_FOR_nothing)
5365 	  || targetm.slow_unaligned_access (mode, align)))
5366     {
5367       rtx reg, mem;
5368 
5369       reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
5370       /* Handle PARALLEL.  */
5371       reg = maybe_emit_group_store (reg, TREE_TYPE (from));
5372       reg = force_not_mem (reg);
5373       mem = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5374       if (TREE_CODE (to) == MEM_REF && REF_REVERSE_STORAGE_ORDER (to))
5375 	reg = flip_storage_order (mode, reg);
5376 
5377       if (icode != CODE_FOR_nothing)
5378 	{
5379 	  class expand_operand ops[2];
5380 
5381 	  create_fixed_operand (&ops[0], mem);
5382 	  create_input_operand (&ops[1], reg, mode);
5383 	  /* The movmisalign<mode> pattern cannot fail, else the assignment
5384 	     would silently be omitted.  */
5385 	  expand_insn (icode, 2, ops);
5386 	}
5387       else
5388 	store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg,
5389 			 false);
5390       return;
5391     }
5392 
5393   /* Assignment of a structure component needs special treatment
5394      if the structure component's rtx is not simply a MEM.
5395      Assignment of an array element at a constant index, and assignment of
5396      an array element in an unaligned packed structure field, has the same
5397      problem.  Same for (partially) storing into a non-memory object.  */
5398   if (handled_component_p (to)
5399       || (TREE_CODE (to) == MEM_REF
5400 	  && (REF_REVERSE_STORAGE_ORDER (to)
5401 	      || mem_ref_refers_to_non_mem_p (to)))
5402       || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
5403     {
5404       machine_mode mode1;
5405       poly_int64 bitsize, bitpos;
5406       poly_uint64 bitregion_start = 0;
5407       poly_uint64 bitregion_end = 0;
5408       tree offset;
5409       int unsignedp, reversep, volatilep = 0;
5410       tree tem;
5411 
5412       push_temp_slots ();
5413       tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
5414 				 &unsignedp, &reversep, &volatilep);
5415 
5416       /* Make sure bitpos is not negative, it can wreak havoc later.  */
5417       if (maybe_lt (bitpos, 0))
5418 	{
5419 	  gcc_assert (offset == NULL_TREE);
5420 	  offset = size_int (bits_to_bytes_round_down (bitpos));
5421 	  bitpos = num_trailing_bits (bitpos);
5422 	}
5423 
5424       if (TREE_CODE (to) == COMPONENT_REF
5425 	  && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
5426 	get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
5427       /* The C++ memory model naturally applies to byte-aligned fields.
5428 	 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5429 	 BITSIZE are not byte-aligned, there is no need to limit the range
5430 	 we can access.  This can occur with packed structures in Ada.  */
5431       else if (maybe_gt (bitsize, 0)
5432 	       && multiple_p (bitsize, BITS_PER_UNIT)
5433 	       && multiple_p (bitpos, BITS_PER_UNIT))
5434 	{
5435 	  bitregion_start = bitpos;
5436 	  bitregion_end = bitpos + bitsize - 1;
5437 	}
5438 
5439       to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
5440 
5441       /* If the field has a mode, we want to access it in the
5442 	 field's mode, not the computed mode.
5443 	 If a MEM has VOIDmode (external with incomplete type),
5444 	 use BLKmode for it instead.  */
5445       if (MEM_P (to_rtx))
5446 	{
5447 	  if (mode1 != VOIDmode)
5448 	    to_rtx = adjust_address (to_rtx, mode1, 0);
5449 	  else if (GET_MODE (to_rtx) == VOIDmode)
5450 	    to_rtx = adjust_address (to_rtx, BLKmode, 0);
5451 	}
5452 
5453       if (offset != 0)
5454 	{
5455 	  machine_mode address_mode;
5456 	  rtx offset_rtx;
5457 
5458 	  if (!MEM_P (to_rtx))
5459 	    {
5460 	      /* We can get constant negative offsets into arrays with broken
5461 		 user code.  Translate this to a trap instead of ICEing.  */
5462 	      gcc_assert (TREE_CODE (offset) == INTEGER_CST);
5463 	      expand_builtin_trap ();
5464 	      to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
5465 	    }
5466 
5467 	  offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
5468 	  address_mode = get_address_mode (to_rtx);
5469 	  if (GET_MODE (offset_rtx) != address_mode)
5470 	    {
5471 		/* We cannot be sure that the RTL in offset_rtx is valid outside
5472 		   of a memory address context, so force it into a register
5473 		   before attempting to convert it to the desired mode.  */
5474 	      offset_rtx = force_operand (offset_rtx, NULL_RTX);
5475 	      offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
5476 	    }
5477 
5478 	  /* If we have an expression in OFFSET_RTX and a non-zero
5479 	     byte offset in BITPOS, adding the byte offset before the
5480 	     OFFSET_RTX results in better intermediate code, which makes
5481 	     later rtl optimization passes perform better.
5482 
5483 	     We prefer intermediate code like this:
5484 
5485 	     r124:DI=r123:DI+0x18
5486 	     [r124:DI]=r121:DI
5487 
5488 	     ... instead of ...
5489 
5490 	     r124:DI=r123:DI+0x10
5491 	     [r124:DI+0x8]=r121:DI
5492 
5493 	     This is only done for aligned data values, as these can
5494 	     be expected to result in single move instructions.  */
5495 	  poly_int64 bytepos;
5496 	  if (mode1 != VOIDmode
5497 	      && maybe_ne (bitpos, 0)
5498 	      && maybe_gt (bitsize, 0)
5499 	      && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
5500 	      && multiple_p (bitpos, bitsize)
5501 	      && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
5502 	      && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
5503 	    {
5504 	      to_rtx = adjust_address (to_rtx, mode1, bytepos);
5505 	      bitregion_start = 0;
5506 	      if (known_ge (bitregion_end, poly_uint64 (bitpos)))
5507 		bitregion_end -= bitpos;
5508 	      bitpos = 0;
5509 	    }
5510 
5511 	  to_rtx = offset_address (to_rtx, offset_rtx,
5512 				   highest_pow2_factor_for_target (to,
5513 				   				   offset));
5514 	}
5515 
5516       /* No action is needed if the target is not a memory and the field
5517 	 lies completely outside that target.  This can occur if the source
5518 	 code contains an out-of-bounds access to a small array.  */
5519       if (!MEM_P (to_rtx)
5520 	  && GET_MODE (to_rtx) != BLKmode
5521 	  && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
5522 	{
5523 	  expand_normal (from);
5524 	  result = NULL;
5525 	}
5526       /* Handle expand_expr of a complex value returning a CONCAT.  */
5527       else if (GET_CODE (to_rtx) == CONCAT)
5528 	{
5529 	  machine_mode to_mode = GET_MODE (to_rtx);
5530 	  gcc_checking_assert (COMPLEX_MODE_P (to_mode));
5531 	  poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
5532 	  unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
5533 	  if (TYPE_MODE (TREE_TYPE (from)) == to_mode
5534 	      && known_eq (bitpos, 0)
5535 	      && known_eq (bitsize, mode_bitsize))
5536 	    result = store_expr (from, to_rtx, false, nontemporal, reversep);
5537 	  else if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE_INNER (to_mode)
5538 		   && known_eq (bitsize, inner_bitsize)
5539 		   && (known_eq (bitpos, 0)
5540 		       || known_eq (bitpos, inner_bitsize)))
5541 	    result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
5542 				 false, nontemporal, reversep);
5543 	  else if (known_le (bitpos + bitsize, inner_bitsize))
5544 	    result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
5545 				  bitregion_start, bitregion_end,
5546 				  mode1, from, get_alias_set (to),
5547 				  nontemporal, reversep);
5548 	  else if (known_ge (bitpos, inner_bitsize))
5549 	    result = store_field (XEXP (to_rtx, 1), bitsize,
5550 				  bitpos - inner_bitsize,
5551 				  bitregion_start, bitregion_end,
5552 				  mode1, from, get_alias_set (to),
5553 				  nontemporal, reversep);
5554 	  else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
5555 	    {
5556 	      result = expand_normal (from);
5557 	      if (GET_CODE (result) == CONCAT)
5558 		{
5559 		  to_mode = GET_MODE_INNER (to_mode);
5560 		  machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
5561 		  rtx from_real
5562 		    = simplify_gen_subreg (to_mode, XEXP (result, 0),
5563 					   from_mode, 0);
5564 		  rtx from_imag
5565 		    = simplify_gen_subreg (to_mode, XEXP (result, 1),
5566 					   from_mode, 0);
5567 		  if (!from_real || !from_imag)
5568 		    goto concat_store_slow;
5569 		  emit_move_insn (XEXP (to_rtx, 0), from_real);
5570 		  emit_move_insn (XEXP (to_rtx, 1), from_imag);
5571 		}
5572 	      else
5573 		{
5574 		  machine_mode from_mode
5575 		    = GET_MODE (result) == VOIDmode
5576 		      ? TYPE_MODE (TREE_TYPE (from))
5577 		      : GET_MODE (result);
5578 		  rtx from_rtx;
5579 		  if (MEM_P (result))
5580 		    from_rtx = change_address (result, to_mode, NULL_RTX);
5581 		  else
5582 		    from_rtx
5583 		      = simplify_gen_subreg (to_mode, result, from_mode, 0);
5584 		  if (from_rtx)
5585 		    {
5586 		      emit_move_insn (XEXP (to_rtx, 0),
5587 				      read_complex_part (from_rtx, false));
5588 		      emit_move_insn (XEXP (to_rtx, 1),
5589 				      read_complex_part (from_rtx, true));
5590 		    }
5591 		  else
5592 		    {
5593 		      to_mode = GET_MODE_INNER (to_mode);
5594 		      rtx from_real
5595 			= simplify_gen_subreg (to_mode, result, from_mode, 0);
5596 		      rtx from_imag
5597 			= simplify_gen_subreg (to_mode, result, from_mode,
5598 					       GET_MODE_SIZE (to_mode));
5599 		      if (!from_real || !from_imag)
5600 			goto concat_store_slow;
5601 		      emit_move_insn (XEXP (to_rtx, 0), from_real);
5602 		      emit_move_insn (XEXP (to_rtx, 1), from_imag);
5603 		    }
5604 		}
5605 	    }
5606 	  else
5607 	    {
5608 	    concat_store_slow:;
5609 	      rtx temp = assign_stack_temp (GET_MODE (to_rtx),
5610 					    GET_MODE_SIZE (GET_MODE (to_rtx)));
5611 	      write_complex_part (temp, XEXP (to_rtx, 0), false);
5612 	      write_complex_part (temp, XEXP (to_rtx, 1), true);
5613 	      result = store_field (temp, bitsize, bitpos,
5614 				    bitregion_start, bitregion_end,
5615 				    mode1, from, get_alias_set (to),
5616 				    nontemporal, reversep);
5617 	      emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
5618 	      emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
5619 	    }
5620 	}
5621       /* For calls to functions returning variable length structures, if TO_RTX
5622 	 is not a MEM, go through a MEM because we must not create temporaries
5623 	 of the VLA type.  */
5624       else if (!MEM_P (to_rtx)
5625 	       && TREE_CODE (from) == CALL_EXPR
5626 	       && COMPLETE_TYPE_P (TREE_TYPE (from))
5627 	       && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) != INTEGER_CST)
5628 	{
5629 	  rtx temp = assign_stack_temp (GET_MODE (to_rtx),
5630 					GET_MODE_SIZE (GET_MODE (to_rtx)));
5631 	  result = store_field (temp, bitsize, bitpos, bitregion_start,
5632 				bitregion_end, mode1, from, get_alias_set (to),
5633 				nontemporal, reversep);
5634 	  emit_move_insn (to_rtx, temp);
5635 	}
5636       else
5637 	{
5638 	  if (MEM_P (to_rtx))
5639 	    {
5640 	      /* If the field is at offset zero, we could have been given the
5641 		 DECL_RTX of the parent struct.  Don't munge it.  */
5642 	      to_rtx = shallow_copy_rtx (to_rtx);
5643 	      set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
5644 	      if (volatilep)
5645 		MEM_VOLATILE_P (to_rtx) = 1;
5646 	    }
5647 
5648 	  gcc_checking_assert (known_ge (bitpos, 0));
5649 	  if (optimize_bitfield_assignment_op (bitsize, bitpos,
5650 					       bitregion_start, bitregion_end,
5651 					       mode1, to_rtx, to, from,
5652 					       reversep))
5653 	    result = NULL;
5654 	  else if (SUBREG_P (to_rtx)
5655 		   && SUBREG_PROMOTED_VAR_P (to_rtx))
5656 	    {
5657 	      /* If to_rtx is a promoted subreg, we need to zero or sign
5658 		 extend the value afterwards.  */
5659 	      if (TREE_CODE (to) == MEM_REF
5660 		  && TYPE_MODE (TREE_TYPE (from)) != BLKmode
5661 		  && !REF_REVERSE_STORAGE_ORDER (to)
5662 		  && known_eq (bitpos, 0)
5663 		  && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (to_rtx))))
5664 		result = store_expr (from, to_rtx, 0, nontemporal, false);
5665 	      else
5666 		{
5667 		  rtx to_rtx1
5668 		    = lowpart_subreg (subreg_unpromoted_mode (to_rtx),
5669 				      SUBREG_REG (to_rtx),
5670 				      subreg_promoted_mode (to_rtx));
5671 		  result = store_field (to_rtx1, bitsize, bitpos,
5672 					bitregion_start, bitregion_end,
5673 					mode1, from, get_alias_set (to),
5674 					nontemporal, reversep);
5675 		  convert_move (SUBREG_REG (to_rtx), to_rtx1,
5676 				SUBREG_PROMOTED_SIGN (to_rtx));
5677 		}
5678 	    }
5679 	  else
5680 	    result = store_field (to_rtx, bitsize, bitpos,
5681 				  bitregion_start, bitregion_end,
5682 				  mode1, from, get_alias_set (to),
5683 				  nontemporal, reversep);
5684 	}
5685 
5686       if (result)
5687 	preserve_temp_slots (result);
5688       pop_temp_slots ();
5689       return;
5690     }
5691 
5692   /* If the rhs is a function call and its value is not an aggregate,
5693      call the function before we start to compute the lhs.
5694      This is needed for correct code for cases such as
5695      val = setjmp (buf) on machines where reference to val
5696      requires loading up part of an address in a separate insn.
5697 
5698      Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5699      since it might be a promoted variable where the zero- or sign- extension
5700      needs to be done.  Handling this in the normal way is safe because no
5701      computation is done before the call.  The same is true for SSA names.  */
5702   if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
5703       && COMPLETE_TYPE_P (TREE_TYPE (from))
5704       && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
5705       && ! (((VAR_P (to)
5706 	      || TREE_CODE (to) == PARM_DECL
5707 	      || TREE_CODE (to) == RESULT_DECL)
5708 	     && REG_P (DECL_RTL (to)))
5709 	    || TREE_CODE (to) == SSA_NAME))
5710     {
5711       rtx value;
5712 
5713       push_temp_slots ();
5714       value = expand_normal (from);
5715 
5716       if (to_rtx == 0)
5717 	to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5718 
5719       /* Handle calls that return values in multiple non-contiguous locations.
5720 	 The Irix 6 ABI has examples of this.  */
5721       if (GET_CODE (to_rtx) == PARALLEL)
5722 	{
5723 	  if (GET_CODE (value) == PARALLEL)
5724 	    emit_group_move (to_rtx, value);
5725 	  else
5726 	    emit_group_load (to_rtx, value, TREE_TYPE (from),
5727 			     int_size_in_bytes (TREE_TYPE (from)));
5728 	}
5729       else if (GET_CODE (value) == PARALLEL)
5730 	emit_group_store (to_rtx, value, TREE_TYPE (from),
5731 			  int_size_in_bytes (TREE_TYPE (from)));
5732       else if (GET_MODE (to_rtx) == BLKmode)
5733 	{
5734 	  /* Handle calls that return BLKmode values in registers.  */
5735 	  if (REG_P (value))
5736 	    copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
5737 	  else
5738 	    emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
5739 	}
5740       else
5741 	{
5742 	  if (POINTER_TYPE_P (TREE_TYPE (to)))
5743 	    value = convert_memory_address_addr_space
5744 	      (as_a <scalar_int_mode> (GET_MODE (to_rtx)), value,
5745 	       TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to))));
5746 
5747 	  emit_move_insn (to_rtx, value);
5748 	}
5749 
5750       preserve_temp_slots (to_rtx);
5751       pop_temp_slots ();
5752       return;
5753     }
5754 
5755   /* Ordinary treatment.  Expand TO to get a REG or MEM rtx.  */
5756   to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5757 
5758   /* Don't move directly into a return register.  */
5759   if (TREE_CODE (to) == RESULT_DECL
5760       && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
5761     {
5762       rtx temp;
5763 
5764       push_temp_slots ();
5765 
5766       /* If the source is itself a return value, it still is in a pseudo at
5767 	 this point so we can move it back to the return register directly.  */
5768       if (REG_P (to_rtx)
5769 	  && TYPE_MODE (TREE_TYPE (from)) == BLKmode
5770 	  && TREE_CODE (from) != CALL_EXPR)
5771 	temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
5772       else
5773 	temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
5774 
5775       /* Handle calls that return values in multiple non-contiguous locations.
5776 	 The Irix 6 ABI has examples of this.  */
5777       if (GET_CODE (to_rtx) == PARALLEL)
5778 	{
5779 	  if (GET_CODE (temp) == PARALLEL)
5780 	    emit_group_move (to_rtx, temp);
5781 	  else
5782 	    emit_group_load (to_rtx, temp, TREE_TYPE (from),
5783 			     int_size_in_bytes (TREE_TYPE (from)));
5784 	}
5785       else if (temp)
5786 	emit_move_insn (to_rtx, temp);
5787 
5788       preserve_temp_slots (to_rtx);
5789       pop_temp_slots ();
5790       return;
5791     }
5792 
5793   /* In case we are returning the contents of an object which overlaps
5794      the place the value is being stored, use a safe function when copying
5795      a value through a pointer into a structure value return block.  */
5796   if (TREE_CODE (to) == RESULT_DECL
5797       && TREE_CODE (from) == INDIRECT_REF
5798       && ADDR_SPACE_GENERIC_P
5799 	   (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from, 0)))))
5800       && refs_may_alias_p (to, from)
5801       && cfun->returns_struct
5802       && !cfun->returns_pcc_struct)
5803     {
5804       rtx from_rtx, size;
5805 
5806       push_temp_slots ();
5807       size = expr_size (from);
5808       from_rtx = expand_normal (from);
5809 
5810       emit_block_move_via_libcall (XEXP (to_rtx, 0), XEXP (from_rtx, 0), size);
5811 
5812       preserve_temp_slots (to_rtx);
5813       pop_temp_slots ();
5814       return;
5815     }
5816 
5817   /* Compute FROM and store the value in the rtx we got.  */
5818 
5819   push_temp_slots ();
5820   result = store_expr (from, to_rtx, 0, nontemporal, false);
5821   preserve_temp_slots (result);
5822   pop_temp_slots ();
5823   return;
5824 }
5825 
5826 /* Emits nontemporal store insn that moves FROM to TO.  Returns true if this
5827    succeeded, false otherwise.  */
5828 
5829 bool
emit_storent_insn(rtx to,rtx from)5830 emit_storent_insn (rtx to, rtx from)
5831 {
5832   class expand_operand ops[2];
5833   machine_mode mode = GET_MODE (to);
5834   enum insn_code code = optab_handler (storent_optab, mode);
5835 
5836   if (code == CODE_FOR_nothing)
5837     return false;
5838 
5839   create_fixed_operand (&ops[0], to);
5840   create_input_operand (&ops[1], from, mode);
5841   return maybe_expand_insn (code, 2, ops);
5842 }
5843 
5844 /* Helper function for store_expr storing of STRING_CST.  */
5845 
5846 static rtx
string_cst_read_str(void * data,void *,HOST_WIDE_INT offset,fixed_size_mode mode)5847 string_cst_read_str (void *data, void *, HOST_WIDE_INT offset,
5848 		     fixed_size_mode mode)
5849 {
5850   tree str = (tree) data;
5851 
5852   gcc_assert (offset >= 0);
5853   if (offset >= TREE_STRING_LENGTH (str))
5854     return const0_rtx;
5855 
5856   if ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
5857       > (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (str))
5858     {
5859       char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode));
5860       size_t l = TREE_STRING_LENGTH (str) - offset;
5861       memcpy (p, TREE_STRING_POINTER (str) + offset, l);
5862       memset (p + l, '\0', GET_MODE_SIZE (mode) - l);
5863       return c_readstr (p, as_a <scalar_int_mode> (mode), false);
5864     }
5865 
5866   /* The by-pieces infrastructure does not try to pick a vector mode
5867      for storing STRING_CST.  */
5868   return c_readstr (TREE_STRING_POINTER (str) + offset,
5869 		    as_a <scalar_int_mode> (mode), false);
5870 }
5871 
5872 /* Generate code for computing expression EXP,
5873    and storing the value into TARGET.
5874 
5875    If the mode is BLKmode then we may return TARGET itself.
5876    It turns out that in BLKmode it doesn't cause a problem.
5877    because C has no operators that could combine two different
5878    assignments into the same BLKmode object with different values
5879    with no sequence point.  Will other languages need this to
5880    be more thorough?
5881 
5882    If CALL_PARAM_P is nonzero, this is a store into a call param on the
5883    stack, and block moves may need to be treated specially.
5884 
5885    If NONTEMPORAL is true, try using a nontemporal store instruction.
5886 
5887    If REVERSE is true, the store is to be done in reverse order.  */
5888 
5889 rtx
store_expr(tree exp,rtx target,int call_param_p,bool nontemporal,bool reverse)5890 store_expr (tree exp, rtx target, int call_param_p,
5891 	    bool nontemporal, bool reverse)
5892 {
5893   rtx temp;
5894   rtx alt_rtl = NULL_RTX;
5895   location_t loc = curr_insn_location ();
5896   bool shortened_string_cst = false;
5897 
5898   if (VOID_TYPE_P (TREE_TYPE (exp)))
5899     {
5900       /* C++ can generate ?: expressions with a throw expression in one
5901 	 branch and an rvalue in the other. Here, we resolve attempts to
5902 	 store the throw expression's nonexistent result.  */
5903       gcc_assert (!call_param_p);
5904       expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
5905       return NULL_RTX;
5906     }
5907   if (TREE_CODE (exp) == COMPOUND_EXPR)
5908     {
5909       /* Perform first part of compound expression, then assign from second
5910 	 part.  */
5911       expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
5912 		   call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5913       return store_expr (TREE_OPERAND (exp, 1), target,
5914 				     call_param_p, nontemporal, reverse);
5915     }
5916   else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
5917     {
5918       /* For conditional expression, get safe form of the target.  Then
5919 	 test the condition, doing the appropriate assignment on either
5920 	 side.  This avoids the creation of unnecessary temporaries.
5921 	 For non-BLKmode, it is more efficient not to do this.  */
5922 
5923       rtx_code_label *lab1 = gen_label_rtx (), *lab2 = gen_label_rtx ();
5924 
5925       do_pending_stack_adjust ();
5926       NO_DEFER_POP;
5927       jumpifnot (TREE_OPERAND (exp, 0), lab1,
5928 		 profile_probability::uninitialized ());
5929       store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
5930 		  nontemporal, reverse);
5931       emit_jump_insn (targetm.gen_jump (lab2));
5932       emit_barrier ();
5933       emit_label (lab1);
5934       store_expr (TREE_OPERAND (exp, 2), target, call_param_p,
5935 		  nontemporal, reverse);
5936       emit_label (lab2);
5937       OK_DEFER_POP;
5938 
5939       return NULL_RTX;
5940     }
5941   else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
5942     /* If this is a scalar in a register that is stored in a wider mode
5943        than the declared mode, compute the result into its declared mode
5944        and then convert to the wider mode.  Our value is the computed
5945        expression.  */
5946     {
5947       rtx inner_target = 0;
5948       scalar_int_mode outer_mode = subreg_unpromoted_mode (target);
5949       scalar_int_mode inner_mode = subreg_promoted_mode (target);
5950 
5951       /* We can do the conversion inside EXP, which will often result
5952 	 in some optimizations.  Do the conversion in two steps: first
5953 	 change the signedness, if needed, then the extend.  But don't
5954 	 do this if the type of EXP is a subtype of something else
5955 	 since then the conversion might involve more than just
5956 	 converting modes.  */
5957       if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
5958 	  && TREE_TYPE (TREE_TYPE (exp)) == 0
5959 	  && GET_MODE_PRECISION (outer_mode)
5960 	     == TYPE_PRECISION (TREE_TYPE (exp)))
5961 	{
5962 	  if (!SUBREG_CHECK_PROMOTED_SIGN (target,
5963 					  TYPE_UNSIGNED (TREE_TYPE (exp))))
5964 	    {
5965 	      /* Some types, e.g. Fortran's logical*4, won't have a signed
5966 		 version, so use the mode instead.  */
5967 	      tree ntype
5968 		= (signed_or_unsigned_type_for
5969 		   (SUBREG_PROMOTED_SIGN (target), TREE_TYPE (exp)));
5970 	      if (ntype == NULL)
5971 		ntype = lang_hooks.types.type_for_mode
5972 		  (TYPE_MODE (TREE_TYPE (exp)),
5973 		   SUBREG_PROMOTED_SIGN (target));
5974 
5975 	      exp = fold_convert_loc (loc, ntype, exp);
5976 	    }
5977 
5978 	  exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
5979 				  (inner_mode, SUBREG_PROMOTED_SIGN (target)),
5980 				  exp);
5981 
5982 	  inner_target = SUBREG_REG (target);
5983 	}
5984 
5985       temp = expand_expr (exp, inner_target, VOIDmode,
5986 			  call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5987 
5988 
5989       /* If TEMP is a VOIDmode constant, use convert_modes to make
5990 	 sure that we properly convert it.  */
5991       if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
5992 	{
5993 	  temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
5994 				temp, SUBREG_PROMOTED_SIGN (target));
5995 	  temp = convert_modes (inner_mode, outer_mode, temp,
5996 				SUBREG_PROMOTED_SIGN (target));
5997 	}
5998       else if (!SCALAR_INT_MODE_P (GET_MODE (temp)))
5999 	temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
6000 			      temp, SUBREG_PROMOTED_SIGN (target));
6001 
6002       convert_move (SUBREG_REG (target), temp,
6003 		    SUBREG_PROMOTED_SIGN (target));
6004 
6005       return NULL_RTX;
6006     }
6007   else if ((TREE_CODE (exp) == STRING_CST
6008 	    || (TREE_CODE (exp) == MEM_REF
6009 		&& TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
6010 		&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6011 		   == STRING_CST
6012 		&& integer_zerop (TREE_OPERAND (exp, 1))))
6013 	   && !nontemporal && !call_param_p
6014 	   && MEM_P (target))
6015     {
6016       /* Optimize initialization of an array with a STRING_CST.  */
6017       HOST_WIDE_INT exp_len, str_copy_len;
6018       rtx dest_mem;
6019       tree str = TREE_CODE (exp) == STRING_CST
6020 		 ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6021 
6022       exp_len = int_expr_size (exp);
6023       if (exp_len <= 0)
6024 	goto normal_expr;
6025 
6026       if (TREE_STRING_LENGTH (str) <= 0)
6027 	goto normal_expr;
6028 
6029       if (can_store_by_pieces (exp_len, string_cst_read_str, (void *) str,
6030 			       MEM_ALIGN (target), false))
6031 	{
6032 	  store_by_pieces (target, exp_len, string_cst_read_str, (void *) str,
6033 			   MEM_ALIGN (target), false, RETURN_BEGIN);
6034 	  return NULL_RTX;
6035 	}
6036 
6037       str_copy_len = TREE_STRING_LENGTH (str);
6038       if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
6039 	{
6040 	  str_copy_len += STORE_MAX_PIECES - 1;
6041 	  str_copy_len &= ~(STORE_MAX_PIECES - 1);
6042 	}
6043       if (str_copy_len >= exp_len)
6044 	goto normal_expr;
6045 
6046       if (!can_store_by_pieces (str_copy_len, string_cst_read_str,
6047 				(void *) str, MEM_ALIGN (target), false))
6048 	goto normal_expr;
6049 
6050       dest_mem = store_by_pieces (target, str_copy_len, string_cst_read_str,
6051 				  (void *) str, MEM_ALIGN (target), false,
6052 				  RETURN_END);
6053       clear_storage (adjust_address_1 (dest_mem, BLKmode, 0, 1, 1, 0,
6054 				       exp_len - str_copy_len),
6055 		     GEN_INT (exp_len - str_copy_len), BLOCK_OP_NORMAL);
6056       return NULL_RTX;
6057     }
6058   else
6059     {
6060       rtx tmp_target;
6061 
6062   normal_expr:
6063       /* If we want to use a nontemporal or a reverse order store, force the
6064 	 value into a register first.  */
6065       tmp_target = nontemporal || reverse ? NULL_RTX : target;
6066       tree rexp = exp;
6067       if (TREE_CODE (exp) == STRING_CST
6068 	  && tmp_target == target
6069 	  && GET_MODE (target) == BLKmode
6070 	  && TYPE_MODE (TREE_TYPE (exp)) == BLKmode)
6071 	{
6072 	  rtx size = expr_size (exp);
6073 	  if (CONST_INT_P (size)
6074 	      && size != const0_rtx
6075 	      && (UINTVAL (size)
6076 		  > ((unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (exp) + 32)))
6077 	    {
6078 	      /* If the STRING_CST has much larger array type than
6079 		 TREE_STRING_LENGTH, only emit the TREE_STRING_LENGTH part of
6080 		 it into the rodata section as the code later on will use
6081 		 memset zero for the remainder anyway.  See PR95052.  */
6082 	      tmp_target = NULL_RTX;
6083 	      rexp = copy_node (exp);
6084 	      tree index
6085 		= build_index_type (size_int (TREE_STRING_LENGTH (exp) - 1));
6086 	      TREE_TYPE (rexp) = build_array_type (TREE_TYPE (TREE_TYPE (exp)),
6087 						   index);
6088 	      shortened_string_cst = true;
6089 	    }
6090 	}
6091       temp = expand_expr_real (rexp, tmp_target, GET_MODE (target),
6092 			       (call_param_p
6093 				? EXPAND_STACK_PARM : EXPAND_NORMAL),
6094 			       &alt_rtl, false);
6095       if (shortened_string_cst)
6096 	{
6097 	  gcc_assert (MEM_P (temp));
6098 	  temp = change_address (temp, BLKmode, NULL_RTX);
6099 	}
6100     }
6101 
6102   /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
6103      the same as that of TARGET, adjust the constant.  This is needed, for
6104      example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
6105      only a word-sized value.  */
6106   if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
6107       && TREE_CODE (exp) != ERROR_MARK
6108       && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
6109     {
6110       gcc_assert (!shortened_string_cst);
6111       if (GET_MODE_CLASS (GET_MODE (target))
6112 	  != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
6113 	  && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
6114 		       GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
6115 	{
6116 	  rtx t = simplify_gen_subreg (GET_MODE (target), temp,
6117 				       TYPE_MODE (TREE_TYPE (exp)), 0);
6118 	  if (t)
6119 	    temp = t;
6120 	}
6121       if (GET_MODE (temp) == VOIDmode)
6122 	temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
6123 			      temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
6124     }
6125 
6126   /* If value was not generated in the target, store it there.
6127      Convert the value to TARGET's type first if necessary and emit the
6128      pending incrementations that have been queued when expanding EXP.
6129      Note that we cannot emit the whole queue blindly because this will
6130      effectively disable the POST_INC optimization later.
6131 
6132      If TEMP and TARGET compare equal according to rtx_equal_p, but
6133      one or both of them are volatile memory refs, we have to distinguish
6134      two cases:
6135      - expand_expr has used TARGET.  In this case, we must not generate
6136        another copy.  This can be detected by TARGET being equal according
6137        to == .
6138      - expand_expr has not used TARGET - that means that the source just
6139        happens to have the same RTX form.  Since temp will have been created
6140        by expand_expr, it will compare unequal according to == .
6141        We must generate a copy in this case, to reach the correct number
6142        of volatile memory references.  */
6143 
6144   if ((! rtx_equal_p (temp, target)
6145        || (temp != target && (side_effects_p (temp)
6146 			      || side_effects_p (target))))
6147       && TREE_CODE (exp) != ERROR_MARK
6148       /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
6149 	 but TARGET is not valid memory reference, TEMP will differ
6150 	 from TARGET although it is really the same location.  */
6151       && !(alt_rtl
6152 	   && rtx_equal_p (alt_rtl, target)
6153 	   && !side_effects_p (alt_rtl)
6154 	   && !side_effects_p (target))
6155       /* If there's nothing to copy, don't bother.  Don't call
6156 	 expr_size unless necessary, because some front-ends (C++)
6157 	 expr_size-hook must not be given objects that are not
6158 	 supposed to be bit-copied or bit-initialized.  */
6159       && expr_size (exp) != const0_rtx)
6160     {
6161       if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
6162 	{
6163 	  gcc_assert (!shortened_string_cst);
6164 	  if (GET_MODE (target) == BLKmode)
6165 	    {
6166 	      /* Handle calls that return BLKmode values in registers.  */
6167 	      if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
6168 		copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
6169 	      else
6170 		store_bit_field (target,
6171 				 rtx_to_poly_int64 (expr_size (exp))
6172 				 * BITS_PER_UNIT,
6173 				 0, 0, 0, GET_MODE (temp), temp, reverse);
6174 	    }
6175 	  else
6176 	    convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
6177 	}
6178 
6179       else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
6180 	{
6181 	  /* Handle copying a string constant into an array.  The string
6182 	     constant may be shorter than the array.  So copy just the string's
6183 	     actual length, and clear the rest.  First get the size of the data
6184 	     type of the string, which is actually the size of the target.  */
6185 	  rtx size = expr_size (exp);
6186 
6187 	  if (CONST_INT_P (size)
6188 	      && INTVAL (size) < TREE_STRING_LENGTH (exp))
6189 	    emit_block_move (target, temp, size,
6190 			     (call_param_p
6191 			      ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
6192 	  else
6193 	    {
6194 	      machine_mode pointer_mode
6195 		= targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
6196 	      machine_mode address_mode = get_address_mode (target);
6197 
6198 	      /* Compute the size of the data to copy from the string.  */
6199 	      tree copy_size
6200 		= size_binop_loc (loc, MIN_EXPR,
6201 				  make_tree (sizetype, size),
6202 				  size_int (TREE_STRING_LENGTH (exp)));
6203 	      rtx copy_size_rtx
6204 		= expand_expr (copy_size, NULL_RTX, VOIDmode,
6205 			       (call_param_p
6206 				? EXPAND_STACK_PARM : EXPAND_NORMAL));
6207 	      rtx_code_label *label = 0;
6208 
6209 	      /* Copy that much.  */
6210 	      copy_size_rtx = convert_to_mode (pointer_mode, copy_size_rtx,
6211 					       TYPE_UNSIGNED (sizetype));
6212 	      emit_block_move (target, temp, copy_size_rtx,
6213 			       (call_param_p
6214 				? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
6215 
6216 	      /* Figure out how much is left in TARGET that we have to clear.
6217 		 Do all calculations in pointer_mode.  */
6218 	      poly_int64 const_copy_size;
6219 	      if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
6220 		{
6221 		  size = plus_constant (address_mode, size, -const_copy_size);
6222 		  target = adjust_address (target, BLKmode, const_copy_size);
6223 		}
6224 	      else
6225 		{
6226 		  size = expand_binop (TYPE_MODE (sizetype), sub_optab, size,
6227 				       copy_size_rtx, NULL_RTX, 0,
6228 				       OPTAB_LIB_WIDEN);
6229 
6230 		  if (GET_MODE (copy_size_rtx) != address_mode)
6231 		    copy_size_rtx = convert_to_mode (address_mode,
6232 						     copy_size_rtx,
6233 						     TYPE_UNSIGNED (sizetype));
6234 
6235 		  target = offset_address (target, copy_size_rtx,
6236 					   highest_pow2_factor (copy_size));
6237 		  label = gen_label_rtx ();
6238 		  emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX,
6239 					   GET_MODE (size), 0, label);
6240 		}
6241 
6242 	      if (size != const0_rtx)
6243 		clear_storage (target, size, BLOCK_OP_NORMAL);
6244 
6245 	      if (label)
6246 		emit_label (label);
6247 	    }
6248 	}
6249       else if (shortened_string_cst)
6250 	gcc_unreachable ();
6251       /* Handle calls that return values in multiple non-contiguous locations.
6252 	 The Irix 6 ABI has examples of this.  */
6253       else if (GET_CODE (target) == PARALLEL)
6254 	{
6255 	  if (GET_CODE (temp) == PARALLEL)
6256 	    emit_group_move (target, temp);
6257 	  else
6258 	    emit_group_load (target, temp, TREE_TYPE (exp),
6259 			     int_size_in_bytes (TREE_TYPE (exp)));
6260 	}
6261       else if (GET_CODE (temp) == PARALLEL)
6262 	emit_group_store (target, temp, TREE_TYPE (exp),
6263 			  int_size_in_bytes (TREE_TYPE (exp)));
6264       else if (GET_MODE (temp) == BLKmode)
6265 	emit_block_move (target, temp, expr_size (exp),
6266 			 (call_param_p
6267 			  ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
6268       /* If we emit a nontemporal store, there is nothing else to do.  */
6269       else if (nontemporal && emit_storent_insn (target, temp))
6270 	;
6271       else
6272 	{
6273 	  if (reverse)
6274 	    temp = flip_storage_order (GET_MODE (target), temp);
6275 	  temp = force_operand (temp, target);
6276 	  if (temp != target)
6277 	    emit_move_insn (target, temp);
6278 	}
6279     }
6280   else
6281     gcc_assert (!shortened_string_cst);
6282 
6283   return NULL_RTX;
6284 }
6285 
6286 /* Return true if field F of structure TYPE is a flexible array.  */
6287 
6288 static bool
flexible_array_member_p(const_tree f,const_tree type)6289 flexible_array_member_p (const_tree f, const_tree type)
6290 {
6291   const_tree tf;
6292 
6293   tf = TREE_TYPE (f);
6294   return (DECL_CHAIN (f) == NULL
6295 	  && TREE_CODE (tf) == ARRAY_TYPE
6296 	  && TYPE_DOMAIN (tf)
6297 	  && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
6298 	  && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
6299 	  && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
6300 	  && int_size_in_bytes (type) >= 0);
6301 }
6302 
6303 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
6304    must have in order for it to completely initialize a value of type TYPE.
6305    Return -1 if the number isn't known.
6306 
6307    If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE.  */
6308 
6309 static HOST_WIDE_INT
count_type_elements(const_tree type,bool for_ctor_p)6310 count_type_elements (const_tree type, bool for_ctor_p)
6311 {
6312   switch (TREE_CODE (type))
6313     {
6314     case ARRAY_TYPE:
6315       {
6316 	tree nelts;
6317 
6318 	nelts = array_type_nelts (type);
6319 	if (nelts && tree_fits_uhwi_p (nelts))
6320 	  {
6321 	    unsigned HOST_WIDE_INT n;
6322 
6323 	    n = tree_to_uhwi (nelts) + 1;
6324 	    if (n == 0 || for_ctor_p)
6325 	      return n;
6326 	    else
6327 	      return n * count_type_elements (TREE_TYPE (type), false);
6328 	  }
6329 	return for_ctor_p ? -1 : 1;
6330       }
6331 
6332     case RECORD_TYPE:
6333       {
6334 	unsigned HOST_WIDE_INT n;
6335 	tree f;
6336 
6337 	n = 0;
6338 	for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
6339 	  if (TREE_CODE (f) == FIELD_DECL)
6340 	    {
6341 	      if (!for_ctor_p)
6342 		n += count_type_elements (TREE_TYPE (f), false);
6343 	      else if (!flexible_array_member_p (f, type))
6344 		/* Don't count flexible arrays, which are not supposed
6345 		   to be initialized.  */
6346 		n += 1;
6347 	    }
6348 
6349 	return n;
6350       }
6351 
6352     case UNION_TYPE:
6353     case QUAL_UNION_TYPE:
6354       {
6355 	tree f;
6356 	HOST_WIDE_INT n, m;
6357 
6358 	gcc_assert (!for_ctor_p);
6359 	/* Estimate the number of scalars in each field and pick the
6360 	   maximum.  Other estimates would do instead; the idea is simply
6361 	   to make sure that the estimate is not sensitive to the ordering
6362 	   of the fields.  */
6363 	n = 1;
6364 	for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
6365 	  if (TREE_CODE (f) == FIELD_DECL)
6366 	    {
6367 	      m = count_type_elements (TREE_TYPE (f), false);
6368 	      /* If the field doesn't span the whole union, add an extra
6369 		 scalar for the rest.  */
6370 	      if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
6371 				    TYPE_SIZE (type)) != 1)
6372 		m++;
6373 	      if (n < m)
6374 		n = m;
6375 	    }
6376 	return n;
6377       }
6378 
6379     case COMPLEX_TYPE:
6380       return 2;
6381 
6382     case VECTOR_TYPE:
6383       {
6384 	unsigned HOST_WIDE_INT nelts;
6385 	if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
6386 	  return nelts;
6387 	else
6388 	  return -1;
6389       }
6390 
6391     case INTEGER_TYPE:
6392     case REAL_TYPE:
6393     case FIXED_POINT_TYPE:
6394     case ENUMERAL_TYPE:
6395     case BOOLEAN_TYPE:
6396     case POINTER_TYPE:
6397     case OFFSET_TYPE:
6398     case REFERENCE_TYPE:
6399     case NULLPTR_TYPE:
6400     case OPAQUE_TYPE:
6401       return 1;
6402 
6403     case ERROR_MARK:
6404       return 0;
6405 
6406     case VOID_TYPE:
6407     case METHOD_TYPE:
6408     case FUNCTION_TYPE:
6409     case LANG_TYPE:
6410     default:
6411       gcc_unreachable ();
6412     }
6413 }
6414 
6415 /* Helper for categorize_ctor_elements.  Identical interface.  */
6416 
6417 static bool
categorize_ctor_elements_1(const_tree ctor,HOST_WIDE_INT * p_nz_elts,HOST_WIDE_INT * p_unique_nz_elts,HOST_WIDE_INT * p_init_elts,bool * p_complete)6418 categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6419 			    HOST_WIDE_INT *p_unique_nz_elts,
6420 			    HOST_WIDE_INT *p_init_elts, bool *p_complete)
6421 {
6422   unsigned HOST_WIDE_INT idx;
6423   HOST_WIDE_INT nz_elts, unique_nz_elts, init_elts, num_fields;
6424   tree value, purpose, elt_type;
6425 
6426   /* Whether CTOR is a valid constant initializer, in accordance with what
6427      initializer_constant_valid_p does.  If inferred from the constructor
6428      elements, true until proven otherwise.  */
6429   bool const_from_elts_p = constructor_static_from_elts_p (ctor);
6430   bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
6431 
6432   nz_elts = 0;
6433   unique_nz_elts = 0;
6434   init_elts = 0;
6435   num_fields = 0;
6436   elt_type = NULL_TREE;
6437 
6438   FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
6439     {
6440       HOST_WIDE_INT mult = 1;
6441 
6442       if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
6443 	{
6444 	  tree lo_index = TREE_OPERAND (purpose, 0);
6445 	  tree hi_index = TREE_OPERAND (purpose, 1);
6446 
6447 	  if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index))
6448 	    mult = (tree_to_uhwi (hi_index)
6449 		    - tree_to_uhwi (lo_index) + 1);
6450 	}
6451       num_fields += mult;
6452       elt_type = TREE_TYPE (value);
6453 
6454       switch (TREE_CODE (value))
6455 	{
6456 	case CONSTRUCTOR:
6457 	  {
6458 	    HOST_WIDE_INT nz = 0, unz = 0, ic = 0;
6459 
6460 	    bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &unz,
6461 							   &ic, p_complete);
6462 
6463 	    nz_elts += mult * nz;
6464 	    unique_nz_elts += unz;
6465  	    init_elts += mult * ic;
6466 
6467 	    if (const_from_elts_p && const_p)
6468 	      const_p = const_elt_p;
6469 	  }
6470 	  break;
6471 
6472 	case INTEGER_CST:
6473 	case REAL_CST:
6474 	case FIXED_CST:
6475 	  if (!initializer_zerop (value))
6476 	    {
6477 	      nz_elts += mult;
6478 	      unique_nz_elts++;
6479 	    }
6480 	  init_elts += mult;
6481 	  break;
6482 
6483 	case STRING_CST:
6484 	  nz_elts += mult * TREE_STRING_LENGTH (value);
6485 	  unique_nz_elts += TREE_STRING_LENGTH (value);
6486 	  init_elts += mult * TREE_STRING_LENGTH (value);
6487 	  break;
6488 
6489 	case COMPLEX_CST:
6490 	  if (!initializer_zerop (TREE_REALPART (value)))
6491 	    {
6492 	      nz_elts += mult;
6493 	      unique_nz_elts++;
6494 	    }
6495 	  if (!initializer_zerop (TREE_IMAGPART (value)))
6496 	    {
6497 	      nz_elts += mult;
6498 	      unique_nz_elts++;
6499 	    }
6500 	  init_elts += 2 * mult;
6501 	  break;
6502 
6503 	case VECTOR_CST:
6504 	  {
6505 	    /* We can only construct constant-length vectors using
6506 	       CONSTRUCTOR.  */
6507 	    unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
6508 	    for (unsigned int i = 0; i < nunits; ++i)
6509 	      {
6510 		tree v = VECTOR_CST_ELT (value, i);
6511 		if (!initializer_zerop (v))
6512 		  {
6513 		    nz_elts += mult;
6514 		    unique_nz_elts++;
6515 		  }
6516 		init_elts += mult;
6517 	      }
6518 	  }
6519 	  break;
6520 
6521 	default:
6522 	  {
6523 	    HOST_WIDE_INT tc = count_type_elements (elt_type, false);
6524 	    nz_elts += mult * tc;
6525 	    unique_nz_elts += tc;
6526 	    init_elts += mult * tc;
6527 
6528 	    if (const_from_elts_p && const_p)
6529 	      const_p
6530 		= initializer_constant_valid_p (value,
6531 						elt_type,
6532 						TYPE_REVERSE_STORAGE_ORDER
6533 						(TREE_TYPE (ctor)))
6534 		  != NULL_TREE;
6535 	  }
6536 	  break;
6537 	}
6538     }
6539 
6540   if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
6541 						num_fields, elt_type))
6542     *p_complete = false;
6543 
6544   *p_nz_elts += nz_elts;
6545   *p_unique_nz_elts += unique_nz_elts;
6546   *p_init_elts += init_elts;
6547 
6548   return const_p;
6549 }
6550 
6551 /* Examine CTOR to discover:
6552    * how many scalar fields are set to nonzero values,
6553      and place it in *P_NZ_ELTS;
6554    * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
6555      high - low + 1 (this can be useful for callers to determine ctors
6556      that could be cheaply initialized with - perhaps nested - loops
6557      compared to copied from huge read-only data),
6558      and place it in *P_UNIQUE_NZ_ELTS;
6559    * how many scalar fields in total are in CTOR,
6560      and place it in *P_ELT_COUNT.
6561    * whether the constructor is complete -- in the sense that every
6562      meaningful byte is explicitly given a value --
6563      and place it in *P_COMPLETE.
6564 
6565    Return whether or not CTOR is a valid static constant initializer, the same
6566    as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0".  */
6567 
6568 bool
categorize_ctor_elements(const_tree ctor,HOST_WIDE_INT * p_nz_elts,HOST_WIDE_INT * p_unique_nz_elts,HOST_WIDE_INT * p_init_elts,bool * p_complete)6569 categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6570 			  HOST_WIDE_INT *p_unique_nz_elts,
6571 			  HOST_WIDE_INT *p_init_elts, bool *p_complete)
6572 {
6573   *p_nz_elts = 0;
6574   *p_unique_nz_elts = 0;
6575   *p_init_elts = 0;
6576   *p_complete = true;
6577 
6578   return categorize_ctor_elements_1 (ctor, p_nz_elts, p_unique_nz_elts,
6579 				     p_init_elts, p_complete);
6580 }
6581 
6582 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6583    of which had type LAST_TYPE.  Each element was itself a complete
6584    initializer, in the sense that every meaningful byte was explicitly
6585    given a value.  Return true if the same is true for the constructor
6586    as a whole.  */
6587 
6588 bool
complete_ctor_at_level_p(const_tree type,HOST_WIDE_INT num_elts,const_tree last_type)6589 complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
6590 			  const_tree last_type)
6591 {
6592   if (TREE_CODE (type) == UNION_TYPE
6593       || TREE_CODE (type) == QUAL_UNION_TYPE)
6594     {
6595       if (num_elts == 0)
6596 	return false;
6597 
6598       gcc_assert (num_elts == 1 && last_type);
6599 
6600       /* ??? We could look at each element of the union, and find the
6601 	 largest element.  Which would avoid comparing the size of the
6602 	 initialized element against any tail padding in the union.
6603 	 Doesn't seem worth the effort...  */
6604       return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
6605     }
6606 
6607   return count_type_elements (type, true) == num_elts;
6608 }
6609 
6610 /* Return 1 if EXP contains mostly (3/4) zeros.  */
6611 
6612 static int
mostly_zeros_p(const_tree exp)6613 mostly_zeros_p (const_tree exp)
6614 {
6615   if (TREE_CODE (exp) == CONSTRUCTOR)
6616     {
6617       HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6618       bool complete_p;
6619 
6620       categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6621 				&complete_p);
6622       return !complete_p || nz_elts < init_elts / 4;
6623     }
6624 
6625   return initializer_zerop (exp);
6626 }
6627 
6628 /* Return 1 if EXP contains all zeros.  */
6629 
6630 static int
all_zeros_p(const_tree exp)6631 all_zeros_p (const_tree exp)
6632 {
6633   if (TREE_CODE (exp) == CONSTRUCTOR)
6634     {
6635       HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6636       bool complete_p;
6637 
6638       categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6639 				&complete_p);
6640       return nz_elts == 0;
6641     }
6642 
6643   return initializer_zerop (exp);
6644 }
6645 
6646 /* Helper function for store_constructor.
6647    TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6648    CLEARED is as for store_constructor.
6649    ALIAS_SET is the alias set to use for any stores.
6650    If REVERSE is true, the store is to be done in reverse order.
6651 
6652    This provides a recursive shortcut back to store_constructor when it isn't
6653    necessary to go through store_field.  This is so that we can pass through
6654    the cleared field to let store_constructor know that we may not have to
6655    clear a substructure if the outer structure has already been cleared.  */
6656 
6657 static void
store_constructor_field(rtx target,poly_uint64 bitsize,poly_int64 bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,machine_mode mode,tree exp,int cleared,alias_set_type alias_set,bool reverse)6658 store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
6659 			 poly_uint64 bitregion_start,
6660 			 poly_uint64 bitregion_end,
6661 			 machine_mode mode,
6662 			 tree exp, int cleared,
6663 			 alias_set_type alias_set, bool reverse)
6664 {
6665   poly_int64 bytepos;
6666   poly_uint64 bytesize;
6667   if (TREE_CODE (exp) == CONSTRUCTOR
6668       /* We can only call store_constructor recursively if the size and
6669 	 bit position are on a byte boundary.  */
6670       && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
6671       && maybe_ne (bitsize, 0U)
6672       && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
6673       /* If we have a nonzero bitpos for a register target, then we just
6674 	 let store_field do the bitfield handling.  This is unlikely to
6675 	 generate unnecessary clear instructions anyways.  */
6676       && (known_eq (bitpos, 0) || MEM_P (target)))
6677     {
6678       if (MEM_P (target))
6679 	{
6680 	  machine_mode target_mode = GET_MODE (target);
6681 	  if (target_mode != BLKmode
6682 	      && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
6683 	    target_mode = BLKmode;
6684 	  target = adjust_address (target, target_mode, bytepos);
6685 	}
6686 
6687 
6688       /* Update the alias set, if required.  */
6689       if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
6690 	  && MEM_ALIAS_SET (target) != 0)
6691 	{
6692 	  target = copy_rtx (target);
6693 	  set_mem_alias_set (target, alias_set);
6694 	}
6695 
6696       store_constructor (exp, target, cleared, bytesize, reverse);
6697     }
6698   else
6699     store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
6700 		 exp, alias_set, false, reverse);
6701 }
6702 
6703 
6704 /* Returns the number of FIELD_DECLs in TYPE.  */
6705 
6706 static int
fields_length(const_tree type)6707 fields_length (const_tree type)
6708 {
6709   tree t = TYPE_FIELDS (type);
6710   int count = 0;
6711 
6712   for (; t; t = DECL_CHAIN (t))
6713     if (TREE_CODE (t) == FIELD_DECL)
6714       ++count;
6715 
6716   return count;
6717 }
6718 
6719 
6720 /* Store the value of constructor EXP into the rtx TARGET.
6721    TARGET is either a REG or a MEM; we know it cannot conflict, since
6722    safe_from_p has been called.
6723    CLEARED is true if TARGET is known to have been zero'd.
6724    SIZE is the number of bytes of TARGET we are allowed to modify: this
6725    may not be the same as the size of EXP if we are assigning to a field
6726    which has been packed to exclude padding bits.
6727    If REVERSE is true, the store is to be done in reverse order.  */
6728 
6729 static void
store_constructor(tree exp,rtx target,int cleared,poly_int64 size,bool reverse)6730 store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
6731 		   bool reverse)
6732 {
6733   tree type = TREE_TYPE (exp);
6734   HOST_WIDE_INT exp_size = int_size_in_bytes (type);
6735   poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
6736 
6737   switch (TREE_CODE (type))
6738     {
6739     case RECORD_TYPE:
6740     case UNION_TYPE:
6741     case QUAL_UNION_TYPE:
6742       {
6743 	unsigned HOST_WIDE_INT idx;
6744 	tree field, value;
6745 
6746 	/* The storage order is specified for every aggregate type.  */
6747 	reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6748 
6749 	/* If size is zero or the target is already cleared, do nothing.  */
6750 	if (known_eq (size, 0) || cleared)
6751 	  cleared = 1;
6752 	/* We either clear the aggregate or indicate the value is dead.  */
6753 	else if ((TREE_CODE (type) == UNION_TYPE
6754 		  || TREE_CODE (type) == QUAL_UNION_TYPE)
6755 		 && ! CONSTRUCTOR_ELTS (exp))
6756 	  /* If the constructor is empty, clear the union.  */
6757 	  {
6758 	    clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
6759 	    cleared = 1;
6760 	  }
6761 
6762 	/* If we are building a static constructor into a register,
6763 	   set the initial value as zero so we can fold the value into
6764 	   a constant.  But if more than one register is involved,
6765 	   this probably loses.  */
6766 	else if (REG_P (target) && TREE_STATIC (exp)
6767 		 && known_le (GET_MODE_SIZE (GET_MODE (target)),
6768 			      REGMODE_NATURAL_SIZE (GET_MODE (target))))
6769 	  {
6770 	    emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6771 	    cleared = 1;
6772 	  }
6773 
6774         /* If the constructor has fewer fields than the structure or
6775 	   if we are initializing the structure to mostly zeros, clear
6776 	   the whole structure first.  Don't do this if TARGET is a
6777 	   register whose mode size isn't equal to SIZE since
6778 	   clear_storage can't handle this case.  */
6779 	else if (known_size_p (size)
6780 		 && (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
6781 		     || mostly_zeros_p (exp))
6782 		 && (!REG_P (target)
6783 		     || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
6784 	  {
6785 	    clear_storage (target, gen_int_mode (size, Pmode),
6786 			   BLOCK_OP_NORMAL);
6787 	    cleared = 1;
6788 	  }
6789 
6790 	if (REG_P (target) && !cleared)
6791 	  emit_clobber (target);
6792 
6793 	/* Store each element of the constructor into the
6794 	   corresponding field of TARGET.  */
6795 	FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
6796 	  {
6797 	    machine_mode mode;
6798 	    HOST_WIDE_INT bitsize;
6799 	    HOST_WIDE_INT bitpos = 0;
6800 	    tree offset;
6801 	    rtx to_rtx = target;
6802 
6803 	    /* Just ignore missing fields.  We cleared the whole
6804 	       structure, above, if any fields are missing.  */
6805 	    if (field == 0)
6806 	      continue;
6807 
6808 	    if (cleared && initializer_zerop (value))
6809 	      continue;
6810 
6811 	    if (tree_fits_uhwi_p (DECL_SIZE (field)))
6812 	      bitsize = tree_to_uhwi (DECL_SIZE (field));
6813 	    else
6814 	      gcc_unreachable ();
6815 
6816 	    mode = DECL_MODE (field);
6817 	    if (DECL_BIT_FIELD (field))
6818 	      mode = VOIDmode;
6819 
6820 	    offset = DECL_FIELD_OFFSET (field);
6821 	    if (tree_fits_shwi_p (offset)
6822 		&& tree_fits_shwi_p (bit_position (field)))
6823 	      {
6824 		bitpos = int_bit_position (field);
6825 		offset = NULL_TREE;
6826 	      }
6827 	    else
6828 	      gcc_unreachable ();
6829 
6830 	    /* If this initializes a field that is smaller than a
6831 	       word, at the start of a word, try to widen it to a full
6832 	       word.  This special case allows us to output C++ member
6833 	       function initializations in a form that the optimizers
6834 	       can understand.  */
6835 	    if (WORD_REGISTER_OPERATIONS
6836 		&& REG_P (target)
6837 		&& bitsize < BITS_PER_WORD
6838 		&& bitpos % BITS_PER_WORD == 0
6839 		&& GET_MODE_CLASS (mode) == MODE_INT
6840 		&& TREE_CODE (value) == INTEGER_CST
6841 		&& exp_size >= 0
6842 		&& bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
6843 	      {
6844 		type = TREE_TYPE (value);
6845 
6846 		if (TYPE_PRECISION (type) < BITS_PER_WORD)
6847 		  {
6848 		    type = lang_hooks.types.type_for_mode
6849 		      (word_mode, TYPE_UNSIGNED (type));
6850 		    value = fold_convert (type, value);
6851 		    /* Make sure the bits beyond the original bitsize are zero
6852 		       so that we can correctly avoid extra zeroing stores in
6853 		       later constructor elements.  */
6854 		    tree bitsize_mask
6855 		      = wide_int_to_tree (type, wi::mask (bitsize, false,
6856 							   BITS_PER_WORD));
6857 		    value = fold_build2 (BIT_AND_EXPR, type, value, bitsize_mask);
6858 		  }
6859 
6860 		if (BYTES_BIG_ENDIAN)
6861 		  value
6862 		   = fold_build2 (LSHIFT_EXPR, type, value,
6863 				   build_int_cst (type,
6864 						  BITS_PER_WORD - bitsize));
6865 		bitsize = BITS_PER_WORD;
6866 		mode = word_mode;
6867 	      }
6868 
6869 	    if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
6870 		&& DECL_NONADDRESSABLE_P (field))
6871 	      {
6872 		to_rtx = copy_rtx (to_rtx);
6873 		MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
6874 	      }
6875 
6876 	    store_constructor_field (to_rtx, bitsize, bitpos,
6877 				     0, bitregion_end, mode,
6878 				     value, cleared,
6879 				     get_alias_set (TREE_TYPE (field)),
6880 				     reverse);
6881 	  }
6882 	break;
6883       }
6884     case ARRAY_TYPE:
6885       {
6886 	tree value, index;
6887 	unsigned HOST_WIDE_INT i;
6888 	int need_to_clear;
6889 	tree domain;
6890 	tree elttype = TREE_TYPE (type);
6891 	int const_bounds_p;
6892 	HOST_WIDE_INT minelt = 0;
6893 	HOST_WIDE_INT maxelt = 0;
6894 
6895 	/* The storage order is specified for every aggregate type.  */
6896 	reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6897 
6898 	domain = TYPE_DOMAIN (type);
6899 	const_bounds_p = (TYPE_MIN_VALUE (domain)
6900 			  && TYPE_MAX_VALUE (domain)
6901 			  && tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
6902 			  && tree_fits_shwi_p (TYPE_MAX_VALUE (domain)));
6903 
6904 	/* If we have constant bounds for the range of the type, get them.  */
6905 	if (const_bounds_p)
6906 	  {
6907 	    minelt = tree_to_shwi (TYPE_MIN_VALUE (domain));
6908 	    maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain));
6909 	  }
6910 
6911 	/* If the constructor has fewer elements than the array, clear
6912            the whole array first.  Similarly if this is static
6913            constructor of a non-BLKmode object.  */
6914 	if (cleared)
6915 	  need_to_clear = 0;
6916 	else if (REG_P (target) && TREE_STATIC (exp))
6917 	  need_to_clear = 1;
6918 	else
6919 	  {
6920 	    unsigned HOST_WIDE_INT idx;
6921 	    HOST_WIDE_INT count = 0, zero_count = 0;
6922 	    need_to_clear = ! const_bounds_p;
6923 
6924 	    /* This loop is a more accurate version of the loop in
6925 	       mostly_zeros_p (it handles RANGE_EXPR in an index).  It
6926 	       is also needed to check for missing elements.  */
6927 	    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
6928 	      {
6929 		HOST_WIDE_INT this_node_count;
6930 
6931 		if (need_to_clear)
6932 		  break;
6933 
6934 		if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6935 		  {
6936 		    tree lo_index = TREE_OPERAND (index, 0);
6937 		    tree hi_index = TREE_OPERAND (index, 1);
6938 
6939 		    if (! tree_fits_uhwi_p (lo_index)
6940 			|| ! tree_fits_uhwi_p (hi_index))
6941 		      {
6942 			need_to_clear = 1;
6943 			break;
6944 		      }
6945 
6946 		    this_node_count = (tree_to_uhwi (hi_index)
6947 				       - tree_to_uhwi (lo_index) + 1);
6948 		  }
6949 		else
6950 		  this_node_count = 1;
6951 
6952 		count += this_node_count;
6953 		if (mostly_zeros_p (value))
6954 		  zero_count += this_node_count;
6955 	      }
6956 
6957 	    /* Clear the entire array first if there are any missing
6958 	       elements, or if the incidence of zero elements is >=
6959 	       75%.  */
6960 	    if (! need_to_clear
6961 		&& (count < maxelt - minelt + 1
6962 		    || 4 * zero_count >= 3 * count))
6963 	      need_to_clear = 1;
6964 	  }
6965 
6966 	if (need_to_clear && maybe_gt (size, 0))
6967 	  {
6968 	    if (REG_P (target))
6969 	      emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6970 	    else
6971 	      clear_storage (target, gen_int_mode (size, Pmode),
6972 			     BLOCK_OP_NORMAL);
6973 	    cleared = 1;
6974 	  }
6975 
6976 	if (!cleared && REG_P (target))
6977 	  /* Inform later passes that the old value is dead.  */
6978 	  emit_clobber (target);
6979 
6980 	/* Store each element of the constructor into the
6981 	   corresponding element of TARGET, determined by counting the
6982 	   elements.  */
6983 	FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
6984 	  {
6985 	    machine_mode mode;
6986 	    poly_int64 bitsize;
6987 	    HOST_WIDE_INT bitpos;
6988 	    rtx xtarget = target;
6989 
6990 	    if (cleared && initializer_zerop (value))
6991 	      continue;
6992 
6993 	    mode = TYPE_MODE (elttype);
6994 	    if (mode != BLKmode)
6995 	      bitsize = GET_MODE_BITSIZE (mode);
6996 	    else if (!poly_int_tree_p (TYPE_SIZE (elttype), &bitsize))
6997 	      bitsize = -1;
6998 
6999 	    if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
7000 	      {
7001 		tree lo_index = TREE_OPERAND (index, 0);
7002 		tree hi_index = TREE_OPERAND (index, 1);
7003 		rtx index_r, pos_rtx;
7004 		HOST_WIDE_INT lo, hi, count;
7005 		tree position;
7006 
7007 		/* If the range is constant and "small", unroll the loop.  */
7008 		if (const_bounds_p
7009 		    && tree_fits_shwi_p (lo_index)
7010 		    && tree_fits_shwi_p (hi_index)
7011 		    && (lo = tree_to_shwi (lo_index),
7012 			hi = tree_to_shwi (hi_index),
7013 			count = hi - lo + 1,
7014 			(!MEM_P (target)
7015 			 || count <= 2
7016 			 || (tree_fits_uhwi_p (TYPE_SIZE (elttype))
7017 			     && (tree_to_uhwi (TYPE_SIZE (elttype)) * count
7018 				 <= 40 * 8)))))
7019 		  {
7020 		    lo -= minelt;  hi -= minelt;
7021 		    for (; lo <= hi; lo++)
7022 		      {
7023 			bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype));
7024 
7025 			if (MEM_P (target)
7026 			    && !MEM_KEEP_ALIAS_SET_P (target)
7027 			    && TREE_CODE (type) == ARRAY_TYPE
7028 			    && TYPE_NONALIASED_COMPONENT (type))
7029 			  {
7030 			    target = copy_rtx (target);
7031 			    MEM_KEEP_ALIAS_SET_P (target) = 1;
7032 			  }
7033 
7034 			store_constructor_field
7035 			  (target, bitsize, bitpos, 0, bitregion_end,
7036 			   mode, value, cleared,
7037 			   get_alias_set (elttype), reverse);
7038 		      }
7039 		  }
7040 		else
7041 		  {
7042 		    rtx_code_label *loop_start = gen_label_rtx ();
7043 		    rtx_code_label *loop_end = gen_label_rtx ();
7044 		    tree exit_cond;
7045 
7046 		    expand_normal (hi_index);
7047 
7048 		    index = build_decl (EXPR_LOCATION (exp),
7049 					VAR_DECL, NULL_TREE, domain);
7050 		    index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
7051 		    SET_DECL_RTL (index, index_r);
7052 		    store_expr (lo_index, index_r, 0, false, reverse);
7053 
7054 		    /* Build the head of the loop.  */
7055 		    do_pending_stack_adjust ();
7056 		    emit_label (loop_start);
7057 
7058 		    /* Assign value to element index.  */
7059 		    position =
7060 		      fold_convert (ssizetype,
7061 				    fold_build2 (MINUS_EXPR,
7062 						 TREE_TYPE (index),
7063 						 index,
7064 						 TYPE_MIN_VALUE (domain)));
7065 
7066 		    position =
7067 			size_binop (MULT_EXPR, position,
7068 				    fold_convert (ssizetype,
7069 						  TYPE_SIZE_UNIT (elttype)));
7070 
7071 		    pos_rtx = expand_normal (position);
7072 		    xtarget = offset_address (target, pos_rtx,
7073 					      highest_pow2_factor (position));
7074 		    xtarget = adjust_address (xtarget, mode, 0);
7075 		    if (TREE_CODE (value) == CONSTRUCTOR)
7076 		      store_constructor (value, xtarget, cleared,
7077 					 exact_div (bitsize, BITS_PER_UNIT),
7078 					 reverse);
7079 		    else
7080 		      store_expr (value, xtarget, 0, false, reverse);
7081 
7082 		    /* Generate a conditional jump to exit the loop.  */
7083 		    exit_cond = build2 (LT_EXPR, integer_type_node,
7084 					index, hi_index);
7085 		    jumpif (exit_cond, loop_end,
7086 			    profile_probability::uninitialized ());
7087 
7088 		    /* Update the loop counter, and jump to the head of
7089 		       the loop.  */
7090 		    expand_assignment (index,
7091 				       build2 (PLUS_EXPR, TREE_TYPE (index),
7092 					       index, integer_one_node),
7093 				       false);
7094 
7095 		    emit_jump (loop_start);
7096 
7097 		    /* Build the end of the loop.  */
7098 		    emit_label (loop_end);
7099 		  }
7100 	      }
7101 	    else if ((index != 0 && ! tree_fits_shwi_p (index))
7102 		     || ! tree_fits_uhwi_p (TYPE_SIZE (elttype)))
7103 	      {
7104 		tree position;
7105 
7106 		if (index == 0)
7107 		  index = ssize_int (1);
7108 
7109 		if (minelt)
7110 		  index = fold_convert (ssizetype,
7111 					fold_build2 (MINUS_EXPR,
7112 						     TREE_TYPE (index),
7113 						     index,
7114 						     TYPE_MIN_VALUE (domain)));
7115 
7116 		position =
7117 		  size_binop (MULT_EXPR, index,
7118 			      fold_convert (ssizetype,
7119 					    TYPE_SIZE_UNIT (elttype)));
7120 		xtarget = offset_address (target,
7121 					  expand_normal (position),
7122 					  highest_pow2_factor (position));
7123 		xtarget = adjust_address (xtarget, mode, 0);
7124 		store_expr (value, xtarget, 0, false, reverse);
7125 	      }
7126 	    else
7127 	      {
7128 		if (index != 0)
7129 		  bitpos = ((tree_to_shwi (index) - minelt)
7130 			    * tree_to_uhwi (TYPE_SIZE (elttype)));
7131 		else
7132 		  bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype)));
7133 
7134 		if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
7135 		    && TREE_CODE (type) == ARRAY_TYPE
7136 		    && TYPE_NONALIASED_COMPONENT (type))
7137 		  {
7138 		    target = copy_rtx (target);
7139 		    MEM_KEEP_ALIAS_SET_P (target) = 1;
7140 		  }
7141 		store_constructor_field (target, bitsize, bitpos, 0,
7142 					 bitregion_end, mode, value,
7143 					 cleared, get_alias_set (elttype),
7144 					 reverse);
7145 	      }
7146 	  }
7147 	break;
7148       }
7149 
7150     case VECTOR_TYPE:
7151       {
7152 	unsigned HOST_WIDE_INT idx;
7153 	constructor_elt *ce;
7154 	int i;
7155 	int need_to_clear;
7156 	insn_code icode = CODE_FOR_nothing;
7157 	tree elt;
7158 	tree elttype = TREE_TYPE (type);
7159 	int elt_size = vector_element_bits (type);
7160 	machine_mode eltmode = TYPE_MODE (elttype);
7161 	HOST_WIDE_INT bitsize;
7162 	HOST_WIDE_INT bitpos;
7163 	rtvec vector = NULL;
7164 	poly_uint64 n_elts;
7165 	unsigned HOST_WIDE_INT const_n_elts;
7166 	alias_set_type alias;
7167 	bool vec_vec_init_p = false;
7168 	machine_mode mode = GET_MODE (target);
7169 
7170 	gcc_assert (eltmode != BLKmode);
7171 
7172 	/* Try using vec_duplicate_optab for uniform vectors.  */
7173 	if (!TREE_SIDE_EFFECTS (exp)
7174 	    && VECTOR_MODE_P (mode)
7175 	    && eltmode == GET_MODE_INNER (mode)
7176 	    && ((icode = optab_handler (vec_duplicate_optab, mode))
7177 		!= CODE_FOR_nothing)
7178 	    && (elt = uniform_vector_p (exp))
7179 	    && !VECTOR_TYPE_P (TREE_TYPE (elt)))
7180 	  {
7181 	    class expand_operand ops[2];
7182 	    create_output_operand (&ops[0], target, mode);
7183 	    create_input_operand (&ops[1], expand_normal (elt), eltmode);
7184 	    expand_insn (icode, 2, ops);
7185 	    if (!rtx_equal_p (target, ops[0].value))
7186 	      emit_move_insn (target, ops[0].value);
7187 	    break;
7188 	  }
7189 
7190 	n_elts = TYPE_VECTOR_SUBPARTS (type);
7191 	if (REG_P (target)
7192 	    && VECTOR_MODE_P (mode)
7193 	    && n_elts.is_constant (&const_n_elts))
7194 	  {
7195 	    machine_mode emode = eltmode;
7196 	    bool vector_typed_elts_p = false;
7197 
7198 	    if (CONSTRUCTOR_NELTS (exp)
7199 		&& (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value))
7200 		    == VECTOR_TYPE))
7201 	      {
7202 		tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
7203 		gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
7204 				      * TYPE_VECTOR_SUBPARTS (etype),
7205 				      n_elts));
7206 		emode = TYPE_MODE (etype);
7207 		vector_typed_elts_p = true;
7208 	      }
7209 	    icode = convert_optab_handler (vec_init_optab, mode, emode);
7210 	    if (icode != CODE_FOR_nothing)
7211 	      {
7212 		unsigned int n = const_n_elts;
7213 
7214 		if (vector_typed_elts_p)
7215 		  {
7216 		    n = CONSTRUCTOR_NELTS (exp);
7217 		    vec_vec_init_p = true;
7218 		  }
7219 		vector = rtvec_alloc (n);
7220 		for (unsigned int k = 0; k < n; k++)
7221 		  RTVEC_ELT (vector, k) = CONST0_RTX (emode);
7222 	      }
7223 	  }
7224 
7225 	/* Compute the size of the elements in the CTOR.  It differs
7226 	   from the size of the vector type elements only when the
7227 	   CTOR elements are vectors themselves.  */
7228 	tree val_type = (CONSTRUCTOR_NELTS (exp) != 0
7229 			 ? TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value)
7230 			 : elttype);
7231 	if (VECTOR_TYPE_P (val_type))
7232 	  bitsize = tree_to_uhwi (TYPE_SIZE (val_type));
7233 	else
7234 	  bitsize = elt_size;
7235 
7236 	/* If the constructor has fewer elements than the vector,
7237 	   clear the whole array first.  Similarly if this is static
7238 	   constructor of a non-BLKmode object.  */
7239 	if (cleared)
7240 	  need_to_clear = 0;
7241 	else if (REG_P (target) && TREE_STATIC (exp))
7242 	  need_to_clear = 1;
7243 	else
7244 	  {
7245 	    unsigned HOST_WIDE_INT count = 0, zero_count = 0;
7246 	    tree value;
7247 
7248 	    FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
7249 	      {
7250 		int n_elts_here = bitsize / elt_size;
7251 		count += n_elts_here;
7252 		if (mostly_zeros_p (value))
7253 		  zero_count += n_elts_here;
7254 	      }
7255 
7256 	    /* Clear the entire vector first if there are any missing elements,
7257 	       or if the incidence of zero elements is >= 75%.  */
7258 	    need_to_clear = (maybe_lt (count, n_elts)
7259 			     || 4 * zero_count >= 3 * count);
7260 	  }
7261 
7262 	if (need_to_clear && maybe_gt (size, 0) && !vector)
7263 	  {
7264 	    if (REG_P (target))
7265 	      emit_move_insn (target, CONST0_RTX (mode));
7266 	    else
7267 	      clear_storage (target, gen_int_mode (size, Pmode),
7268 			     BLOCK_OP_NORMAL);
7269 	    cleared = 1;
7270 	  }
7271 
7272 	/* Inform later passes that the old value is dead.  */
7273 	if (!cleared && !vector && REG_P (target))
7274 	  emit_move_insn (target, CONST0_RTX (mode));
7275 
7276         if (MEM_P (target))
7277 	  alias = MEM_ALIAS_SET (target);
7278 	else
7279 	  alias = get_alias_set (elttype);
7280 
7281         /* Store each element of the constructor into the corresponding
7282 	   element of TARGET, determined by counting the elements.  */
7283 	for (idx = 0, i = 0;
7284 	     vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
7285 	     idx++, i += bitsize / elt_size)
7286 	  {
7287 	    HOST_WIDE_INT eltpos;
7288 	    tree value = ce->value;
7289 
7290 	    if (cleared && initializer_zerop (value))
7291 	      continue;
7292 
7293 	    if (ce->index)
7294 	      eltpos = tree_to_uhwi (ce->index);
7295 	    else
7296 	      eltpos = i;
7297 
7298 	    if (vector)
7299 	      {
7300 		if (vec_vec_init_p)
7301 		  {
7302 		    gcc_assert (ce->index == NULL_TREE);
7303 		    gcc_assert (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE);
7304 		    eltpos = idx;
7305 		  }
7306 		else
7307 		  gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
7308 		RTVEC_ELT (vector, eltpos) = expand_normal (value);
7309 	      }
7310 	    else
7311 	      {
7312 		machine_mode value_mode
7313 		  = (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE
7314 		     ? TYPE_MODE (TREE_TYPE (value)) : eltmode);
7315 		bitpos = eltpos * elt_size;
7316 		store_constructor_field (target, bitsize, bitpos, 0,
7317 					 bitregion_end, value_mode,
7318 					 value, cleared, alias, reverse);
7319 	      }
7320 	  }
7321 
7322 	if (vector)
7323 	  emit_insn (GEN_FCN (icode) (target,
7324 				      gen_rtx_PARALLEL (mode, vector)));
7325 	break;
7326       }
7327 
7328     default:
7329       gcc_unreachable ();
7330     }
7331 }
7332 
7333 /* Store the value of EXP (an expression tree)
7334    into a subfield of TARGET which has mode MODE and occupies
7335    BITSIZE bits, starting BITPOS bits from the start of TARGET.
7336    If MODE is VOIDmode, it means that we are storing into a bit-field.
7337 
7338    BITREGION_START is bitpos of the first bitfield in this region.
7339    BITREGION_END is the bitpos of the ending bitfield in this region.
7340    These two fields are 0, if the C++ memory model does not apply,
7341    or we are not interested in keeping track of bitfield regions.
7342 
7343    Always return const0_rtx unless we have something particular to
7344    return.
7345 
7346    ALIAS_SET is the alias set for the destination.  This value will
7347    (in general) be different from that for TARGET, since TARGET is a
7348    reference to the containing structure.
7349 
7350    If NONTEMPORAL is true, try generating a nontemporal store.
7351 
7352    If REVERSE is true, the store is to be done in reverse order.  */
7353 
7354 static rtx
store_field(rtx target,poly_int64 bitsize,poly_int64 bitpos,poly_uint64 bitregion_start,poly_uint64 bitregion_end,machine_mode mode,tree exp,alias_set_type alias_set,bool nontemporal,bool reverse)7355 store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
7356 	     poly_uint64 bitregion_start, poly_uint64 bitregion_end,
7357 	     machine_mode mode, tree exp,
7358 	     alias_set_type alias_set, bool nontemporal,  bool reverse)
7359 {
7360   if (TREE_CODE (exp) == ERROR_MARK)
7361     return const0_rtx;
7362 
7363   /* If we have nothing to store, do nothing unless the expression has
7364      side-effects.  Don't do that for zero sized addressable lhs of
7365      calls.  */
7366   if (known_eq (bitsize, 0)
7367       && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
7368 	  || TREE_CODE (exp) != CALL_EXPR))
7369     return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
7370 
7371   if (GET_CODE (target) == CONCAT)
7372     {
7373       /* We're storing into a struct containing a single __complex.  */
7374 
7375       gcc_assert (known_eq (bitpos, 0));
7376       return store_expr (exp, target, 0, nontemporal, reverse);
7377     }
7378 
7379   /* If the structure is in a register or if the component
7380      is a bit field, we cannot use addressing to access it.
7381      Use bit-field techniques or SUBREG to store in it.  */
7382 
7383   poly_int64 decl_bitsize;
7384   if (mode == VOIDmode
7385       || (mode != BLKmode && ! direct_store[(int) mode]
7386 	  && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
7387 	  && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
7388       || REG_P (target)
7389       || GET_CODE (target) == SUBREG
7390       /* If the field isn't aligned enough to store as an ordinary memref,
7391 	 store it as a bit field.  */
7392       || (mode != BLKmode
7393 	  && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
7394 		|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
7395 	       && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
7396 	      || !multiple_p (bitpos, BITS_PER_UNIT)))
7397       || (known_size_p (bitsize)
7398 	  && mode != BLKmode
7399 	  && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
7400       /* If the RHS and field are a constant size and the size of the
7401 	 RHS isn't the same size as the bitfield, we must use bitfield
7402 	 operations.  */
7403       || (known_size_p (bitsize)
7404 	  && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
7405 	  && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
7406 		       bitsize)
7407 	  /* Except for initialization of full bytes from a CONSTRUCTOR, which
7408 	     we will handle specially below.  */
7409 	  && !(TREE_CODE (exp) == CONSTRUCTOR
7410 	       && multiple_p (bitsize, BITS_PER_UNIT))
7411 	  /* And except for bitwise copying of TREE_ADDRESSABLE types,
7412 	     where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
7413 	     includes some extra padding.  store_expr / expand_expr will in
7414 	     that case call get_inner_reference that will have the bitsize
7415 	     we check here and thus the block move will not clobber the
7416 	     padding that shouldn't be clobbered.  In the future we could
7417 	     replace the TREE_ADDRESSABLE check with a check that
7418 	     get_base_address needs to live in memory.  */
7419 	  && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
7420 	      || TREE_CODE (exp) != COMPONENT_REF
7421 	      || !multiple_p (bitsize, BITS_PER_UNIT)
7422 	      || !multiple_p (bitpos, BITS_PER_UNIT)
7423 	      || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
7424 				   &decl_bitsize)
7425 	      || maybe_ne (decl_bitsize, bitsize))
7426 	  /* A call with an addressable return type and return-slot
7427 	     optimization must not need bitfield operations but we must
7428 	     pass down the original target.  */
7429 	  && (TREE_CODE (exp) != CALL_EXPR
7430 	      || !TREE_ADDRESSABLE (TREE_TYPE (exp))
7431 	      || !CALL_EXPR_RETURN_SLOT_OPT (exp)))
7432       /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
7433          decl we must use bitfield operations.  */
7434       || (known_size_p (bitsize)
7435 	  && TREE_CODE (exp) == MEM_REF
7436 	  && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
7437 	  && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
7438 	  && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
7439 	  && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
7440     {
7441       rtx temp;
7442       gimple *nop_def;
7443 
7444       /* If EXP is a NOP_EXPR of precision less than its mode, then that
7445 	 implies a mask operation.  If the precision is the same size as
7446 	 the field we're storing into, that mask is redundant.  This is
7447 	 particularly common with bit field assignments generated by the
7448 	 C front end.  */
7449       nop_def = get_def_for_expr (exp, NOP_EXPR);
7450       if (nop_def)
7451 	{
7452 	  tree type = TREE_TYPE (exp);
7453 	  if (INTEGRAL_TYPE_P (type)
7454 	      && maybe_ne (TYPE_PRECISION (type),
7455 			   GET_MODE_BITSIZE (TYPE_MODE (type)))
7456 	      && known_eq (bitsize, TYPE_PRECISION (type)))
7457 	    {
7458 	      tree op = gimple_assign_rhs1 (nop_def);
7459 	      type = TREE_TYPE (op);
7460 	      if (INTEGRAL_TYPE_P (type)
7461 		  && known_ge (TYPE_PRECISION (type), bitsize))
7462 		exp = op;
7463 	    }
7464 	}
7465 
7466       temp = expand_normal (exp);
7467 
7468       /* We don't support variable-sized BLKmode bitfields, since our
7469 	 handling of BLKmode is bound up with the ability to break
7470 	 things into words.  */
7471       gcc_assert (mode != BLKmode || bitsize.is_constant ());
7472 
7473       /* Handle calls that return values in multiple non-contiguous locations.
7474 	 The Irix 6 ABI has examples of this.  */
7475       if (GET_CODE (temp) == PARALLEL)
7476 	{
7477 	  HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7478 	  machine_mode temp_mode = GET_MODE (temp);
7479 	  if (temp_mode == BLKmode || temp_mode == VOIDmode)
7480 	    temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
7481 	  rtx temp_target = gen_reg_rtx (temp_mode);
7482 	  emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
7483 	  temp = temp_target;
7484 	}
7485 
7486       /* Handle calls that return BLKmode values in registers.  */
7487       else if (mode == BLKmode && REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
7488 	{
7489 	  rtx temp_target = gen_reg_rtx (GET_MODE (temp));
7490 	  copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
7491 	  temp = temp_target;
7492 	}
7493 
7494       /* If the value has aggregate type and an integral mode then, if BITSIZE
7495 	 is narrower than this mode and this is for big-endian data, we first
7496 	 need to put the value into the low-order bits for store_bit_field,
7497 	 except when MODE is BLKmode and BITSIZE larger than the word size
7498 	 (see the handling of fields larger than a word in store_bit_field).
7499 	 Moreover, the field may be not aligned on a byte boundary; in this
7500 	 case, if it has reverse storage order, it needs to be accessed as a
7501 	 scalar field with reverse storage order and we must first put the
7502 	 value into target order.  */
7503       scalar_int_mode temp_mode;
7504       if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
7505 	  && is_int_mode (GET_MODE (temp), &temp_mode))
7506 	{
7507 	  HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
7508 
7509 	  reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
7510 
7511 	  if (reverse)
7512 	    temp = flip_storage_order (temp_mode, temp);
7513 
7514 	  gcc_checking_assert (known_le (bitsize, size));
7515 	  if (maybe_lt (bitsize, size)
7516 	      && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
7517 	      /* Use of to_constant for BLKmode was checked above.  */
7518 	      && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
7519 	    temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
7520 				 size - bitsize, NULL_RTX, 1);
7521 	}
7522 
7523       /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE.  */
7524       if (mode != VOIDmode && mode != BLKmode
7525 	  && mode != TYPE_MODE (TREE_TYPE (exp)))
7526 	temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
7527 
7528       /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7529 	 and BITPOS must be aligned on a byte boundary.  If so, we simply do
7530 	 a block copy.  Likewise for a BLKmode-like TARGET.  */
7531       if (GET_MODE (temp) == BLKmode
7532 	  && (GET_MODE (target) == BLKmode
7533 	      || (MEM_P (target)
7534 		  && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
7535 		  && multiple_p (bitpos, BITS_PER_UNIT)
7536 		  && multiple_p (bitsize, BITS_PER_UNIT))))
7537 	{
7538 	  gcc_assert (MEM_P (target) && MEM_P (temp));
7539 	  poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
7540 	  poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
7541 
7542 	  target = adjust_address (target, VOIDmode, bytepos);
7543 	  emit_block_move (target, temp,
7544 			   gen_int_mode (bytesize, Pmode),
7545 			   BLOCK_OP_NORMAL);
7546 
7547 	  return const0_rtx;
7548 	}
7549 
7550       /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7551 	 word size, we need to load the value (see again store_bit_field).  */
7552       if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
7553 	{
7554 	  temp_mode = smallest_int_mode_for_size (bitsize);
7555 	  temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
7556 				    temp_mode, false, NULL);
7557 	}
7558 
7559       /* Store the value in the bitfield.  */
7560       gcc_checking_assert (known_ge (bitpos, 0));
7561       store_bit_field (target, bitsize, bitpos,
7562 		       bitregion_start, bitregion_end,
7563 		       mode, temp, reverse);
7564 
7565       return const0_rtx;
7566     }
7567   else
7568     {
7569       /* Now build a reference to just the desired component.  */
7570       rtx to_rtx = adjust_address (target, mode,
7571 				   exact_div (bitpos, BITS_PER_UNIT));
7572 
7573       if (to_rtx == target)
7574 	to_rtx = copy_rtx (to_rtx);
7575 
7576       if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
7577 	set_mem_alias_set (to_rtx, alias_set);
7578 
7579       /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7580 	 into a target smaller than its type; handle that case now.  */
7581       if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
7582 	{
7583 	  poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
7584 	  store_constructor (exp, to_rtx, 0, bytesize, reverse);
7585 	  return to_rtx;
7586 	}
7587 
7588       return store_expr (exp, to_rtx, 0, nontemporal, reverse);
7589     }
7590 }
7591 
7592 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7593    an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7594    codes and find the ultimate containing object, which we return.
7595 
7596    We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7597    bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7598    storage order of the field.
7599    If the position of the field is variable, we store a tree
7600    giving the variable offset (in units) in *POFFSET.
7601    This offset is in addition to the bit position.
7602    If the position is not variable, we store 0 in *POFFSET.
7603 
7604    If any of the extraction expressions is volatile,
7605    we store 1 in *PVOLATILEP.  Otherwise we don't change that.
7606 
7607    If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7608    Otherwise, it is a mode that can be used to access the field.
7609 
7610    If the field describes a variable-sized object, *PMODE is set to
7611    BLKmode and *PBITSIZE is set to -1.  An access cannot be made in
7612    this case, but the address of the object can be found.  */
7613 
7614 tree
get_inner_reference(tree exp,poly_int64_pod * pbitsize,poly_int64_pod * pbitpos,tree * poffset,machine_mode * pmode,int * punsignedp,int * preversep,int * pvolatilep)7615 get_inner_reference (tree exp, poly_int64_pod *pbitsize,
7616 		     poly_int64_pod *pbitpos, tree *poffset,
7617 		     machine_mode *pmode, int *punsignedp,
7618 		     int *preversep, int *pvolatilep)
7619 {
7620   tree size_tree = 0;
7621   machine_mode mode = VOIDmode;
7622   bool blkmode_bitfield = false;
7623   tree offset = size_zero_node;
7624   poly_offset_int bit_offset = 0;
7625 
7626   /* First get the mode, signedness, storage order and size.  We do this from
7627      just the outermost expression.  */
7628   *pbitsize = -1;
7629   if (TREE_CODE (exp) == COMPONENT_REF)
7630     {
7631       tree field = TREE_OPERAND (exp, 1);
7632       size_tree = DECL_SIZE (field);
7633       if (flag_strict_volatile_bitfields > 0
7634 	  && TREE_THIS_VOLATILE (exp)
7635 	  && DECL_BIT_FIELD_TYPE (field)
7636 	  && DECL_MODE (field) != BLKmode)
7637 	/* Volatile bitfields should be accessed in the mode of the
7638 	     field's type, not the mode computed based on the bit
7639 	     size.  */
7640 	mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
7641       else if (!DECL_BIT_FIELD (field))
7642 	{
7643 	  mode = DECL_MODE (field);
7644 	  /* For vector fields re-check the target flags, as DECL_MODE
7645 	     could have been set with different target flags than
7646 	     the current function has.  */
7647 	  if (VECTOR_TYPE_P (TREE_TYPE (field))
7648 	      && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
7649 	    mode = TYPE_MODE (TREE_TYPE (field));
7650 	}
7651       else if (DECL_MODE (field) == BLKmode)
7652 	blkmode_bitfield = true;
7653 
7654       *punsignedp = DECL_UNSIGNED (field);
7655     }
7656   else if (TREE_CODE (exp) == BIT_FIELD_REF)
7657     {
7658       size_tree = TREE_OPERAND (exp, 1);
7659       *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
7660 		     || TYPE_UNSIGNED (TREE_TYPE (exp)));
7661 
7662       /* For vector element types with the correct size of access or for
7663          vector typed accesses use the mode of the access type.  */
7664       if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
7665 	   && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
7666 	   && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
7667 	  || VECTOR_TYPE_P (TREE_TYPE (exp)))
7668 	mode = TYPE_MODE (TREE_TYPE (exp));
7669     }
7670   else
7671     {
7672       mode = TYPE_MODE (TREE_TYPE (exp));
7673       *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
7674 
7675       if (mode == BLKmode)
7676 	size_tree = TYPE_SIZE (TREE_TYPE (exp));
7677       else
7678 	*pbitsize = GET_MODE_BITSIZE (mode);
7679     }
7680 
7681   if (size_tree != 0)
7682     {
7683       if (! tree_fits_uhwi_p (size_tree))
7684 	mode = BLKmode, *pbitsize = -1;
7685       else
7686 	*pbitsize = tree_to_uhwi (size_tree);
7687     }
7688 
7689   *preversep = reverse_storage_order_for_component_p (exp);
7690 
7691   /* Compute cumulative bit-offset for nested component-refs and array-refs,
7692      and find the ultimate containing object.  */
7693   while (1)
7694     {
7695       switch (TREE_CODE (exp))
7696 	{
7697 	case BIT_FIELD_REF:
7698 	  bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
7699 	  break;
7700 
7701 	case COMPONENT_REF:
7702 	  {
7703 	    tree field = TREE_OPERAND (exp, 1);
7704 	    tree this_offset = component_ref_field_offset (exp);
7705 
7706 	    /* If this field hasn't been filled in yet, don't go past it.
7707 	       This should only happen when folding expressions made during
7708 	       type construction.  */
7709 	    if (this_offset == 0)
7710 	      break;
7711 
7712 	    offset = size_binop (PLUS_EXPR, offset, this_offset);
7713 	    bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
7714 
7715 	    /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN.  */
7716 	  }
7717 	  break;
7718 
7719 	case ARRAY_REF:
7720 	case ARRAY_RANGE_REF:
7721 	  {
7722 	    tree index = TREE_OPERAND (exp, 1);
7723 	    tree low_bound = array_ref_low_bound (exp);
7724 	    tree unit_size = array_ref_element_size (exp);
7725 
7726 	    /* We assume all arrays have sizes that are a multiple of a byte.
7727 	       First subtract the lower bound, if any, in the type of the
7728 	       index, then convert to sizetype and multiply by the size of
7729 	       the array element.  */
7730 	    if (! integer_zerop (low_bound))
7731 	      index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
7732 				   index, low_bound);
7733 
7734 	    offset = size_binop (PLUS_EXPR, offset,
7735 			         size_binop (MULT_EXPR,
7736 					     fold_convert (sizetype, index),
7737 					     unit_size));
7738 	  }
7739 	  break;
7740 
7741 	case REALPART_EXPR:
7742 	  break;
7743 
7744 	case IMAGPART_EXPR:
7745 	  bit_offset += *pbitsize;
7746 	  break;
7747 
7748 	case VIEW_CONVERT_EXPR:
7749 	  break;
7750 
7751 	case MEM_REF:
7752 	  /* Hand back the decl for MEM[&decl, off].  */
7753 	  if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
7754 	    {
7755 	      tree off = TREE_OPERAND (exp, 1);
7756 	      if (!integer_zerop (off))
7757 		{
7758 		  poly_offset_int boff = mem_ref_offset (exp);
7759 		  boff <<= LOG2_BITS_PER_UNIT;
7760 		  bit_offset += boff;
7761 		}
7762 	      exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7763 	    }
7764 	  goto done;
7765 
7766 	default:
7767 	  goto done;
7768 	}
7769 
7770       /* If any reference in the chain is volatile, the effect is volatile.  */
7771       if (TREE_THIS_VOLATILE (exp))
7772 	*pvolatilep = 1;
7773 
7774       exp = TREE_OPERAND (exp, 0);
7775     }
7776  done:
7777 
7778   /* If OFFSET is constant, see if we can return the whole thing as a
7779      constant bit position.  Make sure to handle overflow during
7780      this conversion.  */
7781   if (poly_int_tree_p (offset))
7782     {
7783       poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
7784 				      TYPE_PRECISION (sizetype));
7785       tem <<= LOG2_BITS_PER_UNIT;
7786       tem += bit_offset;
7787       if (tem.to_shwi (pbitpos))
7788 	*poffset = offset = NULL_TREE;
7789     }
7790 
7791   /* Otherwise, split it up.  */
7792   if (offset)
7793     {
7794       /* Avoid returning a negative bitpos as this may wreak havoc later.  */
7795       if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
7796         {
7797 	  *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
7798 	  poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
7799 	  offset = size_binop (PLUS_EXPR, offset,
7800 			       build_int_cst (sizetype, bytes.force_shwi ()));
7801 	}
7802 
7803       *poffset = offset;
7804     }
7805 
7806   /* We can use BLKmode for a byte-aligned BLKmode bitfield.  */
7807   if (mode == VOIDmode
7808       && blkmode_bitfield
7809       && multiple_p (*pbitpos, BITS_PER_UNIT)
7810       && multiple_p (*pbitsize, BITS_PER_UNIT))
7811     *pmode = BLKmode;
7812   else
7813     *pmode = mode;
7814 
7815   return exp;
7816 }
7817 
7818 /* Alignment in bits the TARGET of an assignment may be assumed to have.  */
7819 
7820 static unsigned HOST_WIDE_INT
target_align(const_tree target)7821 target_align (const_tree target)
7822 {
7823   /* We might have a chain of nested references with intermediate misaligning
7824      bitfields components, so need to recurse to find out.  */
7825 
7826   unsigned HOST_WIDE_INT this_align, outer_align;
7827 
7828   switch (TREE_CODE (target))
7829     {
7830     case BIT_FIELD_REF:
7831       return 1;
7832 
7833     case COMPONENT_REF:
7834       this_align = DECL_ALIGN (TREE_OPERAND (target, 1));
7835       outer_align = target_align (TREE_OPERAND (target, 0));
7836       return MIN (this_align, outer_align);
7837 
7838     case ARRAY_REF:
7839     case ARRAY_RANGE_REF:
7840       this_align = TYPE_ALIGN (TREE_TYPE (target));
7841       outer_align = target_align (TREE_OPERAND (target, 0));
7842       return MIN (this_align, outer_align);
7843 
7844     CASE_CONVERT:
7845     case NON_LVALUE_EXPR:
7846     case VIEW_CONVERT_EXPR:
7847       this_align = TYPE_ALIGN (TREE_TYPE (target));
7848       outer_align = target_align (TREE_OPERAND (target, 0));
7849       return MAX (this_align, outer_align);
7850 
7851     default:
7852       return TYPE_ALIGN (TREE_TYPE (target));
7853     }
7854 }
7855 
7856 
7857 /* Given an rtx VALUE that may contain additions and multiplications, return
7858    an equivalent value that just refers to a register, memory, or constant.
7859    This is done by generating instructions to perform the arithmetic and
7860    returning a pseudo-register containing the value.
7861 
7862    The returned value may be a REG, SUBREG, MEM or constant.  */
7863 
7864 rtx
force_operand(rtx value,rtx target)7865 force_operand (rtx value, rtx target)
7866 {
7867   rtx op1, op2;
7868   /* Use subtarget as the target for operand 0 of a binary operation.  */
7869   rtx subtarget = get_subtarget (target);
7870   enum rtx_code code = GET_CODE (value);
7871 
7872   /* Check for subreg applied to an expression produced by loop optimizer.  */
7873   if (code == SUBREG
7874       && !REG_P (SUBREG_REG (value))
7875       && !MEM_P (SUBREG_REG (value)))
7876     {
7877       value
7878 	= simplify_gen_subreg (GET_MODE (value),
7879 			       force_reg (GET_MODE (SUBREG_REG (value)),
7880 					  force_operand (SUBREG_REG (value),
7881 							 NULL_RTX)),
7882 			       GET_MODE (SUBREG_REG (value)),
7883 			       SUBREG_BYTE (value));
7884       code = GET_CODE (value);
7885     }
7886 
7887   /* Check for a PIC address load.  */
7888   if ((code == PLUS || code == MINUS)
7889       && XEXP (value, 0) == pic_offset_table_rtx
7890       && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
7891 	  || GET_CODE (XEXP (value, 1)) == LABEL_REF
7892 	  || GET_CODE (XEXP (value, 1)) == CONST))
7893     {
7894       if (!subtarget)
7895 	subtarget = gen_reg_rtx (GET_MODE (value));
7896       emit_move_insn (subtarget, value);
7897       return subtarget;
7898     }
7899 
7900   if (ARITHMETIC_P (value))
7901     {
7902       op2 = XEXP (value, 1);
7903       if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
7904 	subtarget = 0;
7905       if (code == MINUS && CONST_INT_P (op2))
7906 	{
7907 	  code = PLUS;
7908 	  op2 = negate_rtx (GET_MODE (value), op2);
7909 	}
7910 
7911       /* Check for an addition with OP2 a constant integer and our first
7912          operand a PLUS of a virtual register and something else.  In that
7913          case, we want to emit the sum of the virtual register and the
7914          constant first and then add the other value.  This allows virtual
7915          register instantiation to simply modify the constant rather than
7916          creating another one around this addition.  */
7917       if (code == PLUS && CONST_INT_P (op2)
7918 	  && GET_CODE (XEXP (value, 0)) == PLUS
7919 	  && REG_P (XEXP (XEXP (value, 0), 0))
7920 	  && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7921 	  && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
7922 	{
7923 	  rtx temp = expand_simple_binop (GET_MODE (value), code,
7924 					  XEXP (XEXP (value, 0), 0), op2,
7925 					  subtarget, 0, OPTAB_LIB_WIDEN);
7926 	  return expand_simple_binop (GET_MODE (value), code, temp,
7927 				      force_operand (XEXP (XEXP (value,
7928 								 0), 1), 0),
7929 				      target, 0, OPTAB_LIB_WIDEN);
7930 	}
7931 
7932       op1 = force_operand (XEXP (value, 0), subtarget);
7933       op2 = force_operand (op2, NULL_RTX);
7934       switch (code)
7935 	{
7936 	case MULT:
7937 	  return expand_mult (GET_MODE (value), op1, op2, target, 1);
7938 	case DIV:
7939 	  if (!INTEGRAL_MODE_P (GET_MODE (value)))
7940 	    return expand_simple_binop (GET_MODE (value), code, op1, op2,
7941 					target, 1, OPTAB_LIB_WIDEN);
7942 	  else
7943 	    return expand_divmod (0,
7944 				  FLOAT_MODE_P (GET_MODE (value))
7945 				  ? RDIV_EXPR : TRUNC_DIV_EXPR,
7946 				  GET_MODE (value), op1, op2, target, 0);
7947 	case MOD:
7948 	  return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7949 				target, 0);
7950 	case UDIV:
7951 	  return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
7952 				target, 1);
7953 	case UMOD:
7954 	  return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7955 				target, 1);
7956 	case ASHIFTRT:
7957 	  return expand_simple_binop (GET_MODE (value), code, op1, op2,
7958 				      target, 0, OPTAB_LIB_WIDEN);
7959 	default:
7960 	  return expand_simple_binop (GET_MODE (value), code, op1, op2,
7961 				      target, 1, OPTAB_LIB_WIDEN);
7962 	}
7963     }
7964   if (UNARY_P (value))
7965     {
7966       if (!target)
7967 	target = gen_reg_rtx (GET_MODE (value));
7968       op1 = force_operand (XEXP (value, 0), NULL_RTX);
7969       switch (code)
7970 	{
7971 	case ZERO_EXTEND:
7972 	case SIGN_EXTEND:
7973 	case TRUNCATE:
7974 	case FLOAT_EXTEND:
7975 	case FLOAT_TRUNCATE:
7976 	  convert_move (target, op1, code == ZERO_EXTEND);
7977 	  return target;
7978 
7979 	case FIX:
7980 	case UNSIGNED_FIX:
7981 	  expand_fix (target, op1, code == UNSIGNED_FIX);
7982 	  return target;
7983 
7984 	case FLOAT:
7985 	case UNSIGNED_FLOAT:
7986 	  expand_float (target, op1, code == UNSIGNED_FLOAT);
7987 	  return target;
7988 
7989 	default:
7990 	  return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
7991 	}
7992     }
7993 
7994 #ifdef INSN_SCHEDULING
7995   /* On machines that have insn scheduling, we want all memory reference to be
7996      explicit, so we need to deal with such paradoxical SUBREGs.  */
7997   if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
7998     value
7999       = simplify_gen_subreg (GET_MODE (value),
8000 			     force_reg (GET_MODE (SUBREG_REG (value)),
8001 					force_operand (SUBREG_REG (value),
8002 						       NULL_RTX)),
8003 			     GET_MODE (SUBREG_REG (value)),
8004 			     SUBREG_BYTE (value));
8005 #endif
8006 
8007   return value;
8008 }
8009 
8010 /* Subroutine of expand_expr: return nonzero iff there is no way that
8011    EXP can reference X, which is being modified.  TOP_P is nonzero if this
8012    call is going to be used to determine whether we need a temporary
8013    for EXP, as opposed to a recursive call to this function.
8014 
8015    It is always safe for this routine to return zero since it merely
8016    searches for optimization opportunities.  */
8017 
8018 int
safe_from_p(const_rtx x,tree exp,int top_p)8019 safe_from_p (const_rtx x, tree exp, int top_p)
8020 {
8021   rtx exp_rtl = 0;
8022   int i, nops;
8023 
8024   if (x == 0
8025       /* If EXP has varying size, we MUST use a target since we currently
8026 	 have no way of allocating temporaries of variable size
8027 	 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
8028 	 So we assume here that something at a higher level has prevented a
8029 	 clash.  This is somewhat bogus, but the best we can do.  Only
8030 	 do this when X is BLKmode and when we are at the top level.  */
8031       || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
8032 	  && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
8033 	  && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
8034 	      || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
8035 	      || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
8036 	      != INTEGER_CST)
8037 	  && GET_MODE (x) == BLKmode)
8038       /* If X is in the outgoing argument area, it is always safe.  */
8039       || (MEM_P (x)
8040 	  && (XEXP (x, 0) == virtual_outgoing_args_rtx
8041 	      || (GET_CODE (XEXP (x, 0)) == PLUS
8042 		  && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
8043     return 1;
8044 
8045   /* If this is a subreg of a hard register, declare it unsafe, otherwise,
8046      find the underlying pseudo.  */
8047   if (GET_CODE (x) == SUBREG)
8048     {
8049       x = SUBREG_REG (x);
8050       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
8051 	return 0;
8052     }
8053 
8054   /* Now look at our tree code and possibly recurse.  */
8055   switch (TREE_CODE_CLASS (TREE_CODE (exp)))
8056     {
8057     case tcc_declaration:
8058       exp_rtl = DECL_RTL_IF_SET (exp);
8059       break;
8060 
8061     case tcc_constant:
8062       return 1;
8063 
8064     case tcc_exceptional:
8065       if (TREE_CODE (exp) == TREE_LIST)
8066 	{
8067 	  while (1)
8068 	    {
8069 	      if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0))
8070 		return 0;
8071 	      exp = TREE_CHAIN (exp);
8072 	      if (!exp)
8073 		return 1;
8074 	      if (TREE_CODE (exp) != TREE_LIST)
8075 		return safe_from_p (x, exp, 0);
8076 	    }
8077 	}
8078       else if (TREE_CODE (exp) == CONSTRUCTOR)
8079 	{
8080 	  constructor_elt *ce;
8081 	  unsigned HOST_WIDE_INT idx;
8082 
8083 	  FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
8084 	    if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
8085 		|| !safe_from_p (x, ce->value, 0))
8086 	      return 0;
8087 	  return 1;
8088 	}
8089       else if (TREE_CODE (exp) == ERROR_MARK)
8090 	return 1;	/* An already-visited SAVE_EXPR? */
8091       else
8092 	return 0;
8093 
8094     case tcc_statement:
8095       /* The only case we look at here is the DECL_INITIAL inside a
8096 	 DECL_EXPR.  */
8097       return (TREE_CODE (exp) != DECL_EXPR
8098 	      || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
8099 	      || !DECL_INITIAL (DECL_EXPR_DECL (exp))
8100 	      || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
8101 
8102     case tcc_binary:
8103     case tcc_comparison:
8104       if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
8105 	return 0;
8106       /* Fall through.  */
8107 
8108     case tcc_unary:
8109       return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
8110 
8111     case tcc_expression:
8112     case tcc_reference:
8113     case tcc_vl_exp:
8114       /* Now do code-specific tests.  EXP_RTL is set to any rtx we find in
8115 	 the expression.  If it is set, we conflict iff we are that rtx or
8116 	 both are in memory.  Otherwise, we check all operands of the
8117 	 expression recursively.  */
8118 
8119       switch (TREE_CODE (exp))
8120 	{
8121 	case ADDR_EXPR:
8122 	  /* If the operand is static or we are static, we can't conflict.
8123 	     Likewise if we don't conflict with the operand at all.  */
8124 	  if (staticp (TREE_OPERAND (exp, 0))
8125 	      || TREE_STATIC (exp)
8126 	      || safe_from_p (x, TREE_OPERAND (exp, 0), 0))
8127 	    return 1;
8128 
8129 	  /* Otherwise, the only way this can conflict is if we are taking
8130 	     the address of a DECL a that address if part of X, which is
8131 	     very rare.  */
8132 	  exp = TREE_OPERAND (exp, 0);
8133 	  if (DECL_P (exp))
8134 	    {
8135 	      if (!DECL_RTL_SET_P (exp)
8136 		  || !MEM_P (DECL_RTL (exp)))
8137 		return 0;
8138 	      else
8139 		exp_rtl = XEXP (DECL_RTL (exp), 0);
8140 	    }
8141 	  break;
8142 
8143 	case MEM_REF:
8144 	  if (MEM_P (x)
8145 	      && alias_sets_conflict_p (MEM_ALIAS_SET (x),
8146 					get_alias_set (exp)))
8147 	    return 0;
8148 	  break;
8149 
8150 	case CALL_EXPR:
8151 	  /* Assume that the call will clobber all hard registers and
8152 	     all of memory.  */
8153 	  if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
8154 	      || MEM_P (x))
8155 	    return 0;
8156 	  break;
8157 
8158 	case WITH_CLEANUP_EXPR:
8159 	case CLEANUP_POINT_EXPR:
8160 	  /* Lowered by gimplify.cc.  */
8161 	  gcc_unreachable ();
8162 
8163 	case SAVE_EXPR:
8164 	  return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
8165 
8166 	default:
8167 	  break;
8168 	}
8169 
8170       /* If we have an rtx, we do not need to scan our operands.  */
8171       if (exp_rtl)
8172 	break;
8173 
8174       nops = TREE_OPERAND_LENGTH (exp);
8175       for (i = 0; i < nops; i++)
8176 	if (TREE_OPERAND (exp, i) != 0
8177 	    && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
8178 	  return 0;
8179 
8180       break;
8181 
8182     case tcc_type:
8183       /* Should never get a type here.  */
8184       gcc_unreachable ();
8185     }
8186 
8187   /* If we have an rtl, find any enclosed object.  Then see if we conflict
8188      with it.  */
8189   if (exp_rtl)
8190     {
8191       if (GET_CODE (exp_rtl) == SUBREG)
8192 	{
8193 	  exp_rtl = SUBREG_REG (exp_rtl);
8194 	  if (REG_P (exp_rtl)
8195 	      && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
8196 	    return 0;
8197 	}
8198 
8199       /* If the rtl is X, then it is not safe.  Otherwise, it is unless both
8200 	 are memory and they conflict.  */
8201       return ! (rtx_equal_p (x, exp_rtl)
8202 		|| (MEM_P (x) && MEM_P (exp_rtl)
8203 		    && true_dependence (exp_rtl, VOIDmode, x)));
8204     }
8205 
8206   /* If we reach here, it is safe.  */
8207   return 1;
8208 }
8209 
8210 
8211 /* Return the highest power of two that EXP is known to be a multiple of.
8212    This is used in updating alignment of MEMs in array references.  */
8213 
8214 unsigned HOST_WIDE_INT
highest_pow2_factor(const_tree exp)8215 highest_pow2_factor (const_tree exp)
8216 {
8217   unsigned HOST_WIDE_INT ret;
8218   int trailing_zeros = tree_ctz (exp);
8219   if (trailing_zeros >= HOST_BITS_PER_WIDE_INT)
8220     return BIGGEST_ALIGNMENT;
8221   ret = HOST_WIDE_INT_1U << trailing_zeros;
8222   if (ret > BIGGEST_ALIGNMENT)
8223     return BIGGEST_ALIGNMENT;
8224   return ret;
8225 }
8226 
8227 /* Similar, except that the alignment requirements of TARGET are
8228    taken into account.  Assume it is at least as aligned as its
8229    type, unless it is a COMPONENT_REF in which case the layout of
8230    the structure gives the alignment.  */
8231 
8232 static unsigned HOST_WIDE_INT
highest_pow2_factor_for_target(const_tree target,const_tree exp)8233 highest_pow2_factor_for_target (const_tree target, const_tree exp)
8234 {
8235   unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
8236   unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
8237 
8238   return MAX (factor, talign);
8239 }
8240 
8241 /* Convert the tree comparison code TCODE to the rtl one where the
8242    signedness is UNSIGNEDP.  */
8243 
8244 static enum rtx_code
convert_tree_comp_to_rtx(enum tree_code tcode,int unsignedp)8245 convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
8246 {
8247   enum rtx_code code;
8248   switch (tcode)
8249     {
8250     case EQ_EXPR:
8251       code = EQ;
8252       break;
8253     case NE_EXPR:
8254       code = NE;
8255       break;
8256     case LT_EXPR:
8257       code = unsignedp ? LTU : LT;
8258       break;
8259     case LE_EXPR:
8260       code = unsignedp ? LEU : LE;
8261       break;
8262     case GT_EXPR:
8263       code = unsignedp ? GTU : GT;
8264       break;
8265     case GE_EXPR:
8266       code = unsignedp ? GEU : GE;
8267       break;
8268     case UNORDERED_EXPR:
8269       code = UNORDERED;
8270       break;
8271     case ORDERED_EXPR:
8272       code = ORDERED;
8273       break;
8274     case UNLT_EXPR:
8275       code = UNLT;
8276       break;
8277     case UNLE_EXPR:
8278       code = UNLE;
8279       break;
8280     case UNGT_EXPR:
8281       code = UNGT;
8282       break;
8283     case UNGE_EXPR:
8284       code = UNGE;
8285       break;
8286     case UNEQ_EXPR:
8287       code = UNEQ;
8288       break;
8289     case LTGT_EXPR:
8290       code = LTGT;
8291       break;
8292 
8293     default:
8294       gcc_unreachable ();
8295     }
8296   return code;
8297 }
8298 
8299 /* Subroutine of expand_expr.  Expand the two operands of a binary
8300    expression EXP0 and EXP1 placing the results in OP0 and OP1.
8301    The value may be stored in TARGET if TARGET is nonzero.  The
8302    MODIFIER argument is as documented by expand_expr.  */
8303 
8304 void
expand_operands(tree exp0,tree exp1,rtx target,rtx * op0,rtx * op1,enum expand_modifier modifier)8305 expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
8306 		 enum expand_modifier modifier)
8307 {
8308   if (! safe_from_p (target, exp1, 1))
8309     target = 0;
8310   if (operand_equal_p (exp0, exp1, 0))
8311     {
8312       *op0 = expand_expr (exp0, target, VOIDmode, modifier);
8313       *op1 = copy_rtx (*op0);
8314     }
8315   else
8316     {
8317       *op0 = expand_expr (exp0, target, VOIDmode, modifier);
8318       *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier);
8319     }
8320 }
8321 
8322 
8323 /* Return a MEM that contains constant EXP.  DEFER is as for
8324    output_constant_def and MODIFIER is as for expand_expr.  */
8325 
8326 static rtx
expand_expr_constant(tree exp,int defer,enum expand_modifier modifier)8327 expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
8328 {
8329   rtx mem;
8330 
8331   mem = output_constant_def (exp, defer);
8332   if (modifier != EXPAND_INITIALIZER)
8333     mem = use_anchored_address (mem);
8334   return mem;
8335 }
8336 
8337 /* A subroutine of expand_expr_addr_expr.  Evaluate the address of EXP.
8338    The TARGET, TMODE and MODIFIER arguments are as for expand_expr.  */
8339 
8340 static rtx
expand_expr_addr_expr_1(tree exp,rtx target,scalar_int_mode tmode,enum expand_modifier modifier,addr_space_t as)8341 expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
8342 		         enum expand_modifier modifier, addr_space_t as)
8343 {
8344   rtx result, subtarget;
8345   tree inner, offset;
8346   poly_int64 bitsize, bitpos;
8347   int unsignedp, reversep, volatilep = 0;
8348   machine_mode mode1;
8349 
8350   /* If we are taking the address of a constant and are at the top level,
8351      we have to use output_constant_def since we can't call force_const_mem
8352      at top level.  */
8353   /* ??? This should be considered a front-end bug.  We should not be
8354      generating ADDR_EXPR of something that isn't an LVALUE.  The only
8355      exception here is STRING_CST.  */
8356   if (CONSTANT_CLASS_P (exp))
8357     {
8358       result = XEXP (expand_expr_constant (exp, 0, modifier), 0);
8359       if (modifier < EXPAND_SUM)
8360 	result = force_operand (result, target);
8361       return result;
8362     }
8363 
8364   /* Everything must be something allowed by is_gimple_addressable.  */
8365   switch (TREE_CODE (exp))
8366     {
8367     case INDIRECT_REF:
8368       /* This case will happen via recursion for &a->b.  */
8369       return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
8370 
8371     case MEM_REF:
8372       {
8373 	tree tem = TREE_OPERAND (exp, 0);
8374 	if (!integer_zerop (TREE_OPERAND (exp, 1)))
8375 	  tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
8376 	return expand_expr (tem, target, tmode, modifier);
8377       }
8378 
8379     case TARGET_MEM_REF:
8380       return addr_for_mem_ref (exp, as, true);
8381 
8382     case CONST_DECL:
8383       /* Expand the initializer like constants above.  */
8384       result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
8385 					   0, modifier), 0);
8386       if (modifier < EXPAND_SUM)
8387 	result = force_operand (result, target);
8388       return result;
8389 
8390     case REALPART_EXPR:
8391       /* The real part of the complex number is always first, therefore
8392 	 the address is the same as the address of the parent object.  */
8393       offset = 0;
8394       bitpos = 0;
8395       inner = TREE_OPERAND (exp, 0);
8396       break;
8397 
8398     case IMAGPART_EXPR:
8399       /* The imaginary part of the complex number is always second.
8400 	 The expression is therefore always offset by the size of the
8401 	 scalar type.  */
8402       offset = 0;
8403       bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
8404       inner = TREE_OPERAND (exp, 0);
8405       break;
8406 
8407     case COMPOUND_LITERAL_EXPR:
8408       /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
8409 	 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
8410 	 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
8411 	 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
8412 	 the initializers aren't gimplified.  */
8413       if (COMPOUND_LITERAL_EXPR_DECL (exp)
8414 	  && is_global_var (COMPOUND_LITERAL_EXPR_DECL (exp)))
8415 	return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
8416 					target, tmode, modifier, as);
8417       /* FALLTHRU */
8418     default:
8419       /* If the object is a DECL, then expand it for its rtl.  Don't bypass
8420 	 expand_expr, as that can have various side effects; LABEL_DECLs for
8421 	 example, may not have their DECL_RTL set yet.  Expand the rtl of
8422 	 CONSTRUCTORs too, which should yield a memory reference for the
8423 	 constructor's contents.  Assume language specific tree nodes can
8424 	 be expanded in some interesting way.  */
8425       gcc_assert (TREE_CODE (exp) < LAST_AND_UNUSED_TREE_CODE);
8426       if (DECL_P (exp)
8427 	  || TREE_CODE (exp) == CONSTRUCTOR
8428 	  || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
8429 	{
8430 	  result = expand_expr (exp, target, tmode,
8431 				modifier == EXPAND_INITIALIZER
8432 				? EXPAND_INITIALIZER : EXPAND_CONST_ADDRESS);
8433 
8434 	  /* If the DECL isn't in memory, then the DECL wasn't properly
8435 	     marked TREE_ADDRESSABLE, which will be either a front-end
8436 	     or a tree optimizer bug.  */
8437 
8438 	  gcc_assert (MEM_P (result));
8439 	  result = XEXP (result, 0);
8440 
8441 	  /* ??? Is this needed anymore?  */
8442 	  if (DECL_P (exp))
8443 	    TREE_USED (exp) = 1;
8444 
8445 	  if (modifier != EXPAND_INITIALIZER
8446 	      && modifier != EXPAND_CONST_ADDRESS
8447 	      && modifier != EXPAND_SUM)
8448 	    result = force_operand (result, target);
8449 	  return result;
8450 	}
8451 
8452       /* Pass FALSE as the last argument to get_inner_reference although
8453 	 we are expanding to RTL.  The rationale is that we know how to
8454 	 handle "aligning nodes" here: we can just bypass them because
8455 	 they won't change the final object whose address will be returned
8456 	 (they actually exist only for that purpose).  */
8457       inner = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
8458 				   &unsignedp, &reversep, &volatilep);
8459       break;
8460     }
8461 
8462   /* We must have made progress.  */
8463   gcc_assert (inner != exp);
8464 
8465   subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
8466   /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
8467      inner alignment, force the inner to be sufficiently aligned.  */
8468   if (CONSTANT_CLASS_P (inner)
8469       && TYPE_ALIGN (TREE_TYPE (inner)) < TYPE_ALIGN (TREE_TYPE (exp)))
8470     {
8471       inner = copy_node (inner);
8472       TREE_TYPE (inner) = copy_node (TREE_TYPE (inner));
8473       SET_TYPE_ALIGN (TREE_TYPE (inner), TYPE_ALIGN (TREE_TYPE (exp)));
8474       TYPE_USER_ALIGN (TREE_TYPE (inner)) = 1;
8475     }
8476   result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier, as);
8477 
8478   if (offset)
8479     {
8480       rtx tmp;
8481 
8482       if (modifier != EXPAND_NORMAL)
8483 	result = force_operand (result, NULL);
8484       tmp = expand_expr (offset, NULL_RTX, tmode,
8485 			 modifier == EXPAND_INITIALIZER
8486 			  ? EXPAND_INITIALIZER : EXPAND_NORMAL);
8487 
8488       /* expand_expr is allowed to return an object in a mode other
8489 	 than TMODE.  If it did, we need to convert.  */
8490       if (GET_MODE (tmp) != VOIDmode && tmode != GET_MODE (tmp))
8491 	tmp = convert_modes (tmode, GET_MODE (tmp),
8492 			     tmp, TYPE_UNSIGNED (TREE_TYPE (offset)));
8493       result = convert_memory_address_addr_space (tmode, result, as);
8494       tmp = convert_memory_address_addr_space (tmode, tmp, as);
8495 
8496       if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8497 	result = simplify_gen_binary (PLUS, tmode, result, tmp);
8498       else
8499 	{
8500 	  subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
8501 	  result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
8502 					1, OPTAB_LIB_WIDEN);
8503 	}
8504     }
8505 
8506   if (maybe_ne (bitpos, 0))
8507     {
8508       /* Someone beforehand should have rejected taking the address
8509 	 of an object that isn't byte-aligned.  */
8510       poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
8511       result = convert_memory_address_addr_space (tmode, result, as);
8512       result = plus_constant (tmode, result, bytepos);
8513       if (modifier < EXPAND_SUM)
8514 	result = force_operand (result, target);
8515     }
8516 
8517   return result;
8518 }
8519 
8520 /* A subroutine of expand_expr.  Evaluate EXP, which is an ADDR_EXPR.
8521    The TARGET, TMODE and MODIFIER arguments are as for expand_expr.  */
8522 
8523 static rtx
expand_expr_addr_expr(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier)8524 expand_expr_addr_expr (tree exp, rtx target, machine_mode tmode,
8525 		       enum expand_modifier modifier)
8526 {
8527   addr_space_t as = ADDR_SPACE_GENERIC;
8528   scalar_int_mode address_mode = Pmode;
8529   scalar_int_mode pointer_mode = ptr_mode;
8530   machine_mode rmode;
8531   rtx result;
8532 
8533   /* Target mode of VOIDmode says "whatever's natural".  */
8534   if (tmode == VOIDmode)
8535     tmode = TYPE_MODE (TREE_TYPE (exp));
8536 
8537   if (POINTER_TYPE_P (TREE_TYPE (exp)))
8538     {
8539       as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
8540       address_mode = targetm.addr_space.address_mode (as);
8541       pointer_mode = targetm.addr_space.pointer_mode (as);
8542     }
8543 
8544   /* We can get called with some Weird Things if the user does silliness
8545      like "(short) &a".  In that case, convert_memory_address won't do
8546      the right thing, so ignore the given target mode.  */
8547   scalar_int_mode new_tmode = (tmode == pointer_mode
8548 			       ? pointer_mode
8549 			       : address_mode);
8550 
8551   result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
8552 				    new_tmode, modifier, as);
8553 
8554   /* Despite expand_expr claims concerning ignoring TMODE when not
8555      strictly convenient, stuff breaks if we don't honor it.  Note
8556      that combined with the above, we only do this for pointer modes.  */
8557   rmode = GET_MODE (result);
8558   if (rmode == VOIDmode)
8559     rmode = new_tmode;
8560   if (rmode != new_tmode)
8561     result = convert_memory_address_addr_space (new_tmode, result, as);
8562 
8563   return result;
8564 }
8565 
8566 /* Generate code for computing CONSTRUCTOR EXP.
8567    An rtx for the computed value is returned.  If AVOID_TEMP_MEM
8568    is TRUE, instead of creating a temporary variable in memory
8569    NULL is returned and the caller needs to handle it differently.  */
8570 
8571 static rtx
expand_constructor(tree exp,rtx target,enum expand_modifier modifier,bool avoid_temp_mem)8572 expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
8573 		    bool avoid_temp_mem)
8574 {
8575   tree type = TREE_TYPE (exp);
8576   machine_mode mode = TYPE_MODE (type);
8577 
8578   /* Try to avoid creating a temporary at all.  This is possible
8579      if all of the initializer is zero.
8580      FIXME: try to handle all [0..255] initializers we can handle
8581      with memset.  */
8582   if (TREE_STATIC (exp)
8583       && !TREE_ADDRESSABLE (exp)
8584       && target != 0 && mode == BLKmode
8585       && all_zeros_p (exp))
8586     {
8587       clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
8588       return target;
8589     }
8590 
8591   /* All elts simple constants => refer to a constant in memory.  But
8592      if this is a non-BLKmode mode, let it store a field at a time
8593      since that should make a CONST_INT, CONST_WIDE_INT or
8594      CONST_DOUBLE when we fold.  Likewise, if we have a target we can
8595      use, it is best to store directly into the target unless the type
8596      is large enough that memcpy will be used.  If we are making an
8597      initializer and all operands are constant, put it in memory as
8598      well.
8599 
8600      FIXME: Avoid trying to fill vector constructors piece-meal.
8601      Output them with output_constant_def below unless we're sure
8602      they're zeros.  This should go away when vector initializers
8603      are treated like VECTOR_CST instead of arrays.  */
8604   if ((TREE_STATIC (exp)
8605        && ((mode == BLKmode
8606 	    && ! (target != 0 && safe_from_p (target, exp, 1)))
8607 	   || TREE_ADDRESSABLE (exp)
8608 	   || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8609 	       && (! can_move_by_pieces
8610 		   (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8611 		    TYPE_ALIGN (type)))
8612 	       && ! mostly_zeros_p (exp))))
8613       || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
8614 	  && TREE_CONSTANT (exp)))
8615     {
8616       rtx constructor;
8617 
8618       if (avoid_temp_mem)
8619 	return NULL_RTX;
8620 
8621       constructor = expand_expr_constant (exp, 1, modifier);
8622 
8623       if (modifier != EXPAND_CONST_ADDRESS
8624 	  && modifier != EXPAND_INITIALIZER
8625 	  && modifier != EXPAND_SUM)
8626 	constructor = validize_mem (constructor);
8627 
8628       return constructor;
8629     }
8630 
8631   /* If the CTOR is available in static storage and not mostly
8632      zeros and we can move it by pieces prefer to do so since
8633      that's usually more efficient than performing a series of
8634      stores from immediates.  */
8635   if (avoid_temp_mem
8636       && TREE_STATIC (exp)
8637       && TREE_CONSTANT (exp)
8638       && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8639       && can_move_by_pieces (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8640 			     TYPE_ALIGN (type))
8641       && ! mostly_zeros_p (exp))
8642     return NULL_RTX;
8643 
8644   /* Handle calls that pass values in multiple non-contiguous
8645      locations.  The Irix 6 ABI has examples of this.  */
8646   if (target == 0 || ! safe_from_p (target, exp, 1)
8647       || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM
8648       /* Also make a temporary if the store is to volatile memory, to
8649 	 avoid individual accesses to aggregate members.  */
8650       || (GET_CODE (target) == MEM
8651 	  && MEM_VOLATILE_P (target)
8652 	  && !TREE_ADDRESSABLE (TREE_TYPE (exp))))
8653     {
8654       if (avoid_temp_mem)
8655 	return NULL_RTX;
8656 
8657       target = assign_temp (type, TREE_ADDRESSABLE (exp), 1);
8658     }
8659 
8660   store_constructor (exp, target, 0, int_expr_size (exp), false);
8661   return target;
8662 }
8663 
8664 
8665 /* expand_expr: generate code for computing expression EXP.
8666    An rtx for the computed value is returned.  The value is never null.
8667    In the case of a void EXP, const0_rtx is returned.
8668 
8669    The value may be stored in TARGET if TARGET is nonzero.
8670    TARGET is just a suggestion; callers must assume that
8671    the rtx returned may not be the same as TARGET.
8672 
8673    If TARGET is CONST0_RTX, it means that the value will be ignored.
8674 
8675    If TMODE is not VOIDmode, it suggests generating the
8676    result in mode TMODE.  But this is done only when convenient.
8677    Otherwise, TMODE is ignored and the value generated in its natural mode.
8678    TMODE is just a suggestion; callers must assume that
8679    the rtx returned may not have mode TMODE.
8680 
8681    Note that TARGET may have neither TMODE nor MODE.  In that case, it
8682    probably will not be used.
8683 
8684    If MODIFIER is EXPAND_SUM then when EXP is an addition
8685    we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8686    or a nest of (PLUS ...) and (MINUS ...) where the terms are
8687    products as above, or REG or MEM, or constant.
8688    Ordinarily in such cases we would output mul or add instructions
8689    and then return a pseudo reg containing the sum.
8690 
8691    EXPAND_INITIALIZER is much like EXPAND_SUM except that
8692    it also marks a label as absolutely required (it can't be dead).
8693    It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8694    This is used for outputting expressions used in initializers.
8695 
8696    EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8697    with a constant address even if that address is not normally legitimate.
8698    EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8699 
8700    EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8701    a call parameter.  Such targets require special care as we haven't yet
8702    marked TARGET so that it's safe from being trashed by libcalls.  We
8703    don't want to use TARGET for anything but the final result;
8704    Intermediate values must go elsewhere.   Additionally, calls to
8705    emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8706 
8707    If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8708    address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8709    DECL_RTL of the VAR_DECL.  *ALT_RTL is also set if EXP is a
8710    COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8711    recursively.
8712    If the result can be stored at TARGET, and ALT_RTL is non-NULL,
8713    then *ALT_RTL is set to TARGET (before legitimziation).
8714 
8715    If INNER_REFERENCE_P is true, we are expanding an inner reference.
8716    In this case, we don't adjust a returned MEM rtx that wouldn't be
8717    sufficiently aligned for its mode; instead, it's up to the caller
8718    to deal with it afterwards.  This is used to make sure that unaligned
8719    base objects for which out-of-bounds accesses are supported, for
8720    example record types with trailing arrays, aren't realigned behind
8721    the back of the caller.
8722    The normal operating mode is to pass FALSE for this parameter.  */
8723 
8724 rtx
expand_expr_real(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier,rtx * alt_rtl,bool inner_reference_p)8725 expand_expr_real (tree exp, rtx target, machine_mode tmode,
8726 		  enum expand_modifier modifier, rtx *alt_rtl,
8727 		  bool inner_reference_p)
8728 {
8729   rtx ret;
8730 
8731   /* Handle ERROR_MARK before anybody tries to access its type.  */
8732   if (TREE_CODE (exp) == ERROR_MARK
8733       || (TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
8734     {
8735       ret = CONST0_RTX (tmode);
8736       return ret ? ret : const0_rtx;
8737     }
8738 
8739   ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl,
8740 			    inner_reference_p);
8741   return ret;
8742 }
8743 
8744 /* Try to expand the conditional expression which is represented by
8745    TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves.  If it succeeds
8746    return the rtl reg which represents the result.  Otherwise return
8747    NULL_RTX.  */
8748 
8749 static rtx
expand_cond_expr_using_cmove(tree treeop0 ATTRIBUTE_UNUSED,tree treeop1 ATTRIBUTE_UNUSED,tree treeop2 ATTRIBUTE_UNUSED)8750 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
8751 			      tree treeop1 ATTRIBUTE_UNUSED,
8752 			      tree treeop2 ATTRIBUTE_UNUSED)
8753 {
8754   rtx insn;
8755   rtx op00, op01, op1, op2;
8756   enum rtx_code comparison_code;
8757   machine_mode comparison_mode;
8758   gimple *srcstmt;
8759   rtx temp;
8760   tree type = TREE_TYPE (treeop1);
8761   int unsignedp = TYPE_UNSIGNED (type);
8762   machine_mode mode = TYPE_MODE (type);
8763   machine_mode orig_mode = mode;
8764   static bool expanding_cond_expr_using_cmove = false;
8765 
8766   /* Conditional move expansion can end up TERing two operands which,
8767      when recursively hitting conditional expressions can result in
8768      exponential behavior if the cmove expansion ultimatively fails.
8769      It's hardly profitable to TER a cmove into a cmove so avoid doing
8770      that by failing early if we end up recursing.  */
8771   if (expanding_cond_expr_using_cmove)
8772     return NULL_RTX;
8773 
8774   /* If we cannot do a conditional move on the mode, try doing it
8775      with the promoted mode. */
8776   if (!can_conditionally_move_p (mode))
8777     {
8778       mode = promote_mode (type, mode, &unsignedp);
8779       if (!can_conditionally_move_p (mode))
8780 	return NULL_RTX;
8781       temp = assign_temp (type, 0, 0); /* Use promoted mode for temp.  */
8782     }
8783   else
8784     temp = assign_temp (type, 0, 1);
8785 
8786   expanding_cond_expr_using_cmove = true;
8787   start_sequence ();
8788   expand_operands (treeop1, treeop2,
8789 		   mode == orig_mode ? temp : NULL_RTX, &op1, &op2,
8790 		   EXPAND_NORMAL);
8791 
8792   if (TREE_CODE (treeop0) == SSA_NAME
8793       && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
8794     {
8795       type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
8796       enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
8797       op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
8798       op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
8799       comparison_mode = TYPE_MODE (type);
8800       unsignedp = TYPE_UNSIGNED (type);
8801       comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8802     }
8803   else if (COMPARISON_CLASS_P (treeop0))
8804     {
8805       type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
8806       enum tree_code cmpcode = TREE_CODE (treeop0);
8807       op00 = expand_normal (TREE_OPERAND (treeop0, 0));
8808       op01 = expand_normal (TREE_OPERAND (treeop0, 1));
8809       unsignedp = TYPE_UNSIGNED (type);
8810       comparison_mode = TYPE_MODE (type);
8811       comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8812     }
8813   else
8814     {
8815       op00 = expand_normal (treeop0);
8816       op01 = const0_rtx;
8817       comparison_code = NE;
8818       comparison_mode = GET_MODE (op00);
8819       if (comparison_mode == VOIDmode)
8820 	comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
8821     }
8822   expanding_cond_expr_using_cmove = false;
8823 
8824   if (GET_MODE (op1) != mode)
8825     op1 = gen_lowpart (mode, op1);
8826 
8827   if (GET_MODE (op2) != mode)
8828     op2 = gen_lowpart (mode, op2);
8829 
8830   /* Try to emit the conditional move.  */
8831   insn = emit_conditional_move (temp,
8832 				{ comparison_code, op00, op01,
8833 				  comparison_mode },
8834 				op1, op2, mode,
8835 				unsignedp);
8836 
8837   /* If we could do the conditional move, emit the sequence,
8838      and return.  */
8839   if (insn)
8840     {
8841       rtx_insn *seq = get_insns ();
8842       end_sequence ();
8843       emit_insn (seq);
8844       return convert_modes (orig_mode, mode, temp, 0);
8845     }
8846 
8847   /* Otherwise discard the sequence and fall back to code with
8848      branches.  */
8849   end_sequence ();
8850   return NULL_RTX;
8851 }
8852 
8853 /* A helper function for expand_expr_real_2 to be used with a
8854    misaligned mem_ref TEMP.  Assume an unsigned type if UNSIGNEDP
8855    is nonzero, with alignment ALIGN in bits.
8856    Store the value at TARGET if possible (if TARGET is nonzero).
8857    Regardless of TARGET, we return the rtx for where the value is placed.
8858    If the result can be stored at TARGET, and ALT_RTL is non-NULL,
8859    then *ALT_RTL is set to TARGET (before legitimziation).  */
8860 
8861 static rtx
expand_misaligned_mem_ref(rtx temp,machine_mode mode,int unsignedp,unsigned int align,rtx target,rtx * alt_rtl)8862 expand_misaligned_mem_ref (rtx temp, machine_mode mode, int unsignedp,
8863 			   unsigned int align, rtx target, rtx *alt_rtl)
8864 {
8865   enum insn_code icode;
8866 
8867   if ((icode = optab_handler (movmisalign_optab, mode))
8868       != CODE_FOR_nothing)
8869     {
8870       class expand_operand ops[2];
8871 
8872       /* We've already validated the memory, and we're creating a
8873 	 new pseudo destination.  The predicates really can't fail,
8874 	 nor can the generator.  */
8875       create_output_operand (&ops[0], NULL_RTX, mode);
8876       create_fixed_operand (&ops[1], temp);
8877       expand_insn (icode, 2, ops);
8878       temp = ops[0].value;
8879     }
8880   else if (targetm.slow_unaligned_access (mode, align))
8881     temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
8882 			      0, unsignedp, target,
8883 			      mode, mode, false, alt_rtl);
8884   return temp;
8885 }
8886 
8887 /* Helper function of expand_expr_2, expand a division or modulo.
8888    op0 and op1 should be already expanded treeop0 and treeop1, using
8889    expand_operands.  */
8890 
8891 static rtx
expand_expr_divmod(tree_code code,machine_mode mode,tree treeop0,tree treeop1,rtx op0,rtx op1,rtx target,int unsignedp)8892 expand_expr_divmod (tree_code code, machine_mode mode, tree treeop0,
8893 		    tree treeop1, rtx op0, rtx op1, rtx target, int unsignedp)
8894 {
8895   bool mod_p = (code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR
8896 		|| code == CEIL_MOD_EXPR || code == ROUND_MOD_EXPR);
8897   if (SCALAR_INT_MODE_P (mode)
8898       && optimize >= 2
8899       && get_range_pos_neg (treeop0) == 1
8900       && get_range_pos_neg (treeop1) == 1)
8901     {
8902       /* If both arguments are known to be positive when interpreted
8903 	 as signed, we can expand it as both signed and unsigned
8904 	 division or modulo.  Choose the cheaper sequence in that case.  */
8905       bool speed_p = optimize_insn_for_speed_p ();
8906       do_pending_stack_adjust ();
8907       start_sequence ();
8908       rtx uns_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 1);
8909       rtx_insn *uns_insns = get_insns ();
8910       end_sequence ();
8911       start_sequence ();
8912       rtx sgn_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 0);
8913       rtx_insn *sgn_insns = get_insns ();
8914       end_sequence ();
8915       unsigned uns_cost = seq_cost (uns_insns, speed_p);
8916       unsigned sgn_cost = seq_cost (sgn_insns, speed_p);
8917 
8918       /* If costs are the same then use as tie breaker the other other
8919 	 factor.  */
8920       if (uns_cost == sgn_cost)
8921 	{
8922 	  uns_cost = seq_cost (uns_insns, !speed_p);
8923 	  sgn_cost = seq_cost (sgn_insns, !speed_p);
8924 	}
8925 
8926       if (uns_cost < sgn_cost || (uns_cost == sgn_cost && unsignedp))
8927 	{
8928 	  emit_insn (uns_insns);
8929 	  return uns_ret;
8930 	}
8931       emit_insn (sgn_insns);
8932       return sgn_ret;
8933     }
8934   return expand_divmod (mod_p, code, mode, op0, op1, target, unsignedp);
8935 }
8936 
8937 rtx
expand_expr_real_2(sepops ops,rtx target,machine_mode tmode,enum expand_modifier modifier)8938 expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
8939 		    enum expand_modifier modifier)
8940 {
8941   rtx op0, op1, op2, temp;
8942   rtx_code_label *lab;
8943   tree type;
8944   int unsignedp;
8945   machine_mode mode;
8946   scalar_int_mode int_mode;
8947   enum tree_code code = ops->code;
8948   optab this_optab;
8949   rtx subtarget, original_target;
8950   int ignore;
8951   bool reduce_bit_field;
8952   location_t loc = ops->location;
8953   tree treeop0, treeop1, treeop2;
8954 #define REDUCE_BIT_FIELD(expr)	(reduce_bit_field			  \
8955 				 ? reduce_to_bit_field_precision ((expr), \
8956 								  target, \
8957 								  type)	  \
8958 				 : (expr))
8959 
8960   type = ops->type;
8961   mode = TYPE_MODE (type);
8962   unsignedp = TYPE_UNSIGNED (type);
8963 
8964   treeop0 = ops->op0;
8965   treeop1 = ops->op1;
8966   treeop2 = ops->op2;
8967 
8968   /* We should be called only on simple (binary or unary) expressions,
8969      exactly those that are valid in gimple expressions that aren't
8970      GIMPLE_SINGLE_RHS (or invalid).  */
8971   gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
8972 	      || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
8973 	      || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
8974 
8975   ignore = (target == const0_rtx
8976 	    || ((CONVERT_EXPR_CODE_P (code)
8977 		 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
8978 		&& TREE_CODE (type) == VOID_TYPE));
8979 
8980   /* We should be called only if we need the result.  */
8981   gcc_assert (!ignore);
8982 
8983   /* An operation in what may be a bit-field type needs the
8984      result to be reduced to the precision of the bit-field type,
8985      which is narrower than that of the type's mode.  */
8986   reduce_bit_field = (INTEGRAL_TYPE_P (type)
8987 		      && !type_has_mode_precision_p (type));
8988 
8989   if (reduce_bit_field
8990       && (modifier == EXPAND_STACK_PARM
8991 	  || (target && GET_MODE (target) != mode)))
8992     target = 0;
8993 
8994   /* Use subtarget as the target for operand 0 of a binary operation.  */
8995   subtarget = get_subtarget (target);
8996   original_target = target;
8997 
8998   switch (code)
8999     {
9000     case NON_LVALUE_EXPR:
9001     case PAREN_EXPR:
9002     CASE_CONVERT:
9003       if (treeop0 == error_mark_node)
9004 	return const0_rtx;
9005 
9006       if (TREE_CODE (type) == UNION_TYPE)
9007 	{
9008 	  tree valtype = TREE_TYPE (treeop0);
9009 
9010 	  /* If both input and output are BLKmode, this conversion isn't doing
9011 	     anything except possibly changing memory attribute.  */
9012 	  if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode)
9013 	    {
9014 	      rtx result = expand_expr (treeop0, target, tmode,
9015 					modifier);
9016 
9017 	      result = copy_rtx (result);
9018 	      set_mem_attributes (result, type, 0);
9019 	      return result;
9020 	    }
9021 
9022 	  if (target == 0)
9023 	    {
9024 	      if (TYPE_MODE (type) != BLKmode)
9025 		target = gen_reg_rtx (TYPE_MODE (type));
9026 	      else
9027 		target = assign_temp (type, 1, 1);
9028 	    }
9029 
9030 	  if (MEM_P (target))
9031 	    /* Store data into beginning of memory target.  */
9032 	    store_expr (treeop0,
9033 			adjust_address (target, TYPE_MODE (valtype), 0),
9034 			modifier == EXPAND_STACK_PARM,
9035 			false, TYPE_REVERSE_STORAGE_ORDER (type));
9036 
9037 	  else
9038 	    {
9039 	      gcc_assert (REG_P (target)
9040 			  && !TYPE_REVERSE_STORAGE_ORDER (type));
9041 
9042 	      /* Store this field into a union of the proper type.  */
9043 	      poly_uint64 op0_size
9044 		= tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
9045 	      poly_uint64 union_size = GET_MODE_BITSIZE (mode);
9046 	      store_field (target,
9047 			   /* The conversion must be constructed so that
9048 			      we know at compile time how many bits
9049 			      to preserve.  */
9050 			   ordered_min (op0_size, union_size),
9051 			   0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
9052 			   false, false);
9053 	    }
9054 
9055 	  /* Return the entire union.  */
9056 	  return target;
9057 	}
9058 
9059       if (mode == TYPE_MODE (TREE_TYPE (treeop0)))
9060 	{
9061 	  op0 = expand_expr (treeop0, target, VOIDmode,
9062 			     modifier);
9063 
9064 	  /* If the signedness of the conversion differs and OP0 is
9065 	     a promoted SUBREG, clear that indication since we now
9066 	     have to do the proper extension.  */
9067 	  if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
9068 	      && GET_CODE (op0) == SUBREG)
9069 	    SUBREG_PROMOTED_VAR_P (op0) = 0;
9070 
9071 	  return REDUCE_BIT_FIELD (op0);
9072 	}
9073 
9074       op0 = expand_expr (treeop0, NULL_RTX, mode,
9075 			 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
9076       if (GET_MODE (op0) == mode)
9077 	;
9078 
9079       /* If OP0 is a constant, just convert it into the proper mode.  */
9080       else if (CONSTANT_P (op0))
9081 	{
9082 	  tree inner_type = TREE_TYPE (treeop0);
9083 	  machine_mode inner_mode = GET_MODE (op0);
9084 
9085 	  if (inner_mode == VOIDmode)
9086 	    inner_mode = TYPE_MODE (inner_type);
9087 
9088 	  if (modifier == EXPAND_INITIALIZER)
9089 	    op0 = lowpart_subreg (mode, op0, inner_mode);
9090 	  else
9091 	    op0=  convert_modes (mode, inner_mode, op0,
9092 				 TYPE_UNSIGNED (inner_type));
9093 	}
9094 
9095       else if (modifier == EXPAND_INITIALIZER)
9096 	op0 = gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0))
9097 			     ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
9098 
9099       else if (target == 0)
9100 	op0 = convert_to_mode (mode, op0,
9101 			       TYPE_UNSIGNED (TREE_TYPE
9102 					      (treeop0)));
9103       else
9104 	{
9105 	  convert_move (target, op0,
9106 			TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9107 	  op0 = target;
9108 	}
9109 
9110       return REDUCE_BIT_FIELD (op0);
9111 
9112     case ADDR_SPACE_CONVERT_EXPR:
9113       {
9114 	tree treeop0_type = TREE_TYPE (treeop0);
9115 
9116 	gcc_assert (POINTER_TYPE_P (type));
9117 	gcc_assert (POINTER_TYPE_P (treeop0_type));
9118 
9119 	addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
9120 	addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type));
9121 
9122         /* Conversions between pointers to the same address space should
9123 	   have been implemented via CONVERT_EXPR / NOP_EXPR.  */
9124 	gcc_assert (as_to != as_from);
9125 
9126 	op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
9127 
9128         /* Ask target code to handle conversion between pointers
9129 	   to overlapping address spaces.  */
9130 	if (targetm.addr_space.subset_p (as_to, as_from)
9131 	    || targetm.addr_space.subset_p (as_from, as_to))
9132 	  {
9133 	    op0 = targetm.addr_space.convert (op0, treeop0_type, type);
9134 	  }
9135         else
9136           {
9137 	    /* For disjoint address spaces, converting anything but a null
9138 	       pointer invokes undefined behavior.  We truncate or extend the
9139 	       value as if we'd converted via integers, which handles 0 as
9140 	       required, and all others as the programmer likely expects.  */
9141 #ifndef POINTERS_EXTEND_UNSIGNED
9142 	    const int POINTERS_EXTEND_UNSIGNED = 1;
9143 #endif
9144 	    op0 = convert_modes (mode, TYPE_MODE (treeop0_type),
9145 				 op0, POINTERS_EXTEND_UNSIGNED);
9146 	  }
9147 	gcc_assert (op0);
9148 	return op0;
9149       }
9150 
9151     case POINTER_PLUS_EXPR:
9152       /* Even though the sizetype mode and the pointer's mode can be different
9153          expand is able to handle this correctly and get the correct result out
9154          of the PLUS_EXPR code.  */
9155       /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
9156          if sizetype precision is smaller than pointer precision.  */
9157       if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
9158 	treeop1 = fold_convert_loc (loc, type,
9159 				    fold_convert_loc (loc, ssizetype,
9160 						      treeop1));
9161       /* If sizetype precision is larger than pointer precision, truncate the
9162 	 offset to have matching modes.  */
9163       else if (TYPE_PRECISION (sizetype) > TYPE_PRECISION (type))
9164 	treeop1 = fold_convert_loc (loc, type, treeop1);
9165       /* FALLTHRU */
9166 
9167     case PLUS_EXPR:
9168       /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
9169 	 something else, make sure we add the register to the constant and
9170 	 then to the other thing.  This case can occur during strength
9171 	 reduction and doing it this way will produce better code if the
9172 	 frame pointer or argument pointer is eliminated.
9173 
9174 	 fold-const.cc will ensure that the constant is always in the inner
9175 	 PLUS_EXPR, so the only case we need to do anything about is if
9176 	 sp, ap, or fp is our second argument, in which case we must swap
9177 	 the innermost first argument and our second argument.  */
9178 
9179       if (TREE_CODE (treeop0) == PLUS_EXPR
9180 	  && TREE_CODE (TREE_OPERAND (treeop0, 1)) == INTEGER_CST
9181 	  && VAR_P (treeop1)
9182 	  && (DECL_RTL (treeop1) == frame_pointer_rtx
9183 	      || DECL_RTL (treeop1) == stack_pointer_rtx
9184 	      || DECL_RTL (treeop1) == arg_pointer_rtx))
9185 	{
9186 	  gcc_unreachable ();
9187 	}
9188 
9189       /* If the result is to be ptr_mode and we are adding an integer to
9190 	 something, we might be forming a constant.  So try to use
9191 	 plus_constant.  If it produces a sum and we can't accept it,
9192 	 use force_operand.  This allows P = &ARR[const] to generate
9193 	 efficient code on machines where a SYMBOL_REF is not a valid
9194 	 address.
9195 
9196 	 If this is an EXPAND_SUM call, always return the sum.  */
9197       if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
9198 	  || (mode == ptr_mode && (unsignedp || ! flag_trapv)))
9199 	{
9200 	  if (modifier == EXPAND_STACK_PARM)
9201 	    target = 0;
9202 	  if (TREE_CODE (treeop0) == INTEGER_CST
9203 	      && HWI_COMPUTABLE_MODE_P (mode)
9204 	      && TREE_CONSTANT (treeop1))
9205 	    {
9206 	      rtx constant_part;
9207 	      HOST_WIDE_INT wc;
9208 	      machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
9209 
9210 	      op1 = expand_expr (treeop1, subtarget, VOIDmode,
9211 				 EXPAND_SUM);
9212 	      /* Use wi::shwi to ensure that the constant is
9213 		 truncated according to the mode of OP1, then sign extended
9214 		 to a HOST_WIDE_INT.  Using the constant directly can result
9215 		 in non-canonical RTL in a 64x32 cross compile.  */
9216 	      wc = TREE_INT_CST_LOW (treeop0);
9217 	      constant_part =
9218 		immed_wide_int_const (wi::shwi (wc, wmode), wmode);
9219 	      op1 = plus_constant (mode, op1, INTVAL (constant_part));
9220 	      if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
9221 		op1 = force_operand (op1, target);
9222 	      return REDUCE_BIT_FIELD (op1);
9223 	    }
9224 
9225 	  else if (TREE_CODE (treeop1) == INTEGER_CST
9226 		   && HWI_COMPUTABLE_MODE_P (mode)
9227 		   && TREE_CONSTANT (treeop0))
9228 	    {
9229 	      rtx constant_part;
9230 	      HOST_WIDE_INT wc;
9231 	      machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
9232 
9233 	      op0 = expand_expr (treeop0, subtarget, VOIDmode,
9234 				 (modifier == EXPAND_INITIALIZER
9235 				 ? EXPAND_INITIALIZER : EXPAND_SUM));
9236 	      if (! CONSTANT_P (op0))
9237 		{
9238 		  op1 = expand_expr (treeop1, NULL_RTX,
9239 				     VOIDmode, modifier);
9240 		  /* Return a PLUS if modifier says it's OK.  */
9241 		  if (modifier == EXPAND_SUM
9242 		      || modifier == EXPAND_INITIALIZER)
9243 		    return simplify_gen_binary (PLUS, mode, op0, op1);
9244 		  goto binop2;
9245 		}
9246 	      /* Use wi::shwi to ensure that the constant is
9247 		 truncated according to the mode of OP1, then sign extended
9248 		 to a HOST_WIDE_INT.  Using the constant directly can result
9249 		 in non-canonical RTL in a 64x32 cross compile.  */
9250 	      wc = TREE_INT_CST_LOW (treeop1);
9251 	      constant_part
9252 		= immed_wide_int_const (wi::shwi (wc, wmode), wmode);
9253 	      op0 = plus_constant (mode, op0, INTVAL (constant_part));
9254 	      if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
9255 		op0 = force_operand (op0, target);
9256 	      return REDUCE_BIT_FIELD (op0);
9257 	    }
9258 	}
9259 
9260       /* Use TER to expand pointer addition of a negated value
9261 	 as pointer subtraction.  */
9262       if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
9263 	   || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
9264 	       && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
9265 	  && TREE_CODE (treeop1) == SSA_NAME
9266 	  && TYPE_MODE (TREE_TYPE (treeop0))
9267 	     == TYPE_MODE (TREE_TYPE (treeop1)))
9268 	{
9269 	  gimple *def = get_def_for_expr (treeop1, NEGATE_EXPR);
9270 	  if (def)
9271 	    {
9272 	      treeop1 = gimple_assign_rhs1 (def);
9273 	      code = MINUS_EXPR;
9274 	      goto do_minus;
9275 	    }
9276 	}
9277 
9278       /* No sense saving up arithmetic to be done
9279 	 if it's all in the wrong mode to form part of an address.
9280 	 And force_operand won't know whether to sign-extend or
9281 	 zero-extend.  */
9282       if (modifier != EXPAND_INITIALIZER
9283 	  && (modifier != EXPAND_SUM || mode != ptr_mode))
9284 	{
9285 	  expand_operands (treeop0, treeop1,
9286 			   subtarget, &op0, &op1, modifier);
9287 	  if (op0 == const0_rtx)
9288 	    return op1;
9289 	  if (op1 == const0_rtx)
9290 	    return op0;
9291 	  goto binop2;
9292 	}
9293 
9294       expand_operands (treeop0, treeop1,
9295 		       subtarget, &op0, &op1, modifier);
9296       return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
9297 
9298     case MINUS_EXPR:
9299     case POINTER_DIFF_EXPR:
9300     do_minus:
9301       /* For initializers, we are allowed to return a MINUS of two
9302 	 symbolic constants.  Here we handle all cases when both operands
9303 	 are constant.  */
9304       /* Handle difference of two symbolic constants,
9305 	 for the sake of an initializer.  */
9306       if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
9307 	  && really_constant_p (treeop0)
9308 	  && really_constant_p (treeop1))
9309 	{
9310 	  expand_operands (treeop0, treeop1,
9311 			   NULL_RTX, &op0, &op1, modifier);
9312 	  return simplify_gen_binary (MINUS, mode, op0, op1);
9313 	}
9314 
9315       /* No sense saving up arithmetic to be done
9316 	 if it's all in the wrong mode to form part of an address.
9317 	 And force_operand won't know whether to sign-extend or
9318 	 zero-extend.  */
9319       if (modifier != EXPAND_INITIALIZER
9320 	  && (modifier != EXPAND_SUM || mode != ptr_mode))
9321 	goto binop;
9322 
9323       expand_operands (treeop0, treeop1,
9324 		       subtarget, &op0, &op1, modifier);
9325 
9326       /* Convert A - const to A + (-const).  */
9327       if (CONST_INT_P (op1))
9328 	{
9329 	  op1 = negate_rtx (mode, op1);
9330 	  return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
9331 	}
9332 
9333       goto binop2;
9334 
9335     case WIDEN_MULT_PLUS_EXPR:
9336     case WIDEN_MULT_MINUS_EXPR:
9337       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9338       op2 = expand_normal (treeop2);
9339       target = expand_widen_pattern_expr (ops, op0, op1, op2,
9340 					  target, unsignedp);
9341       return target;
9342 
9343     case WIDEN_PLUS_EXPR:
9344     case WIDEN_MINUS_EXPR:
9345     case WIDEN_MULT_EXPR:
9346       /* If first operand is constant, swap them.
9347 	 Thus the following special case checks need only
9348 	 check the second operand.  */
9349       if (TREE_CODE (treeop0) == INTEGER_CST)
9350 	std::swap (treeop0, treeop1);
9351 
9352       /* First, check if we have a multiplication of one signed and one
9353 	 unsigned operand.  */
9354       if (TREE_CODE (treeop1) != INTEGER_CST
9355 	  && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
9356 	      != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
9357 	{
9358 	  machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
9359 	  this_optab = usmul_widen_optab;
9360 	  if (find_widening_optab_handler (this_optab, mode, innermode)
9361 		!= CODE_FOR_nothing)
9362 	    {
9363 	      if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9364 		expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
9365 				 EXPAND_NORMAL);
9366 	      else
9367 		expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
9368 				 EXPAND_NORMAL);
9369 	      /* op0 and op1 might still be constant, despite the above
9370 		 != INTEGER_CST check.  Handle it.  */
9371 	      if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
9372 		{
9373 		  op0 = convert_modes (mode, innermode, op0, true);
9374 		  op1 = convert_modes (mode, innermode, op1, false);
9375 		  return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
9376 							target, unsignedp));
9377 		}
9378 	      goto binop3;
9379 	    }
9380 	}
9381       /* Check for a multiplication with matching signedness.  */
9382       else if ((TREE_CODE (treeop1) == INTEGER_CST
9383 		&& int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
9384 	       || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
9385 		   == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
9386 	{
9387 	  tree op0type = TREE_TYPE (treeop0);
9388 	  machine_mode innermode = TYPE_MODE (op0type);
9389 	  bool zextend_p = TYPE_UNSIGNED (op0type);
9390 	  optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
9391 	  this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
9392 
9393 	  if (TREE_CODE (treeop0) != INTEGER_CST)
9394 	    {
9395 	      if (find_widening_optab_handler (this_optab, mode, innermode)
9396 		  != CODE_FOR_nothing)
9397 		{
9398 		  expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
9399 				   EXPAND_NORMAL);
9400 		  /* op0 and op1 might still be constant, despite the above
9401 		     != INTEGER_CST check.  Handle it.  */
9402 		  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
9403 		    {
9404 		     widen_mult_const:
9405 		      op0 = convert_modes (mode, innermode, op0, zextend_p);
9406 		      op1
9407 			= convert_modes (mode, innermode, op1,
9408 					 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
9409 		      return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
9410 							    target,
9411 							    unsignedp));
9412 		    }
9413 		  temp = expand_widening_mult (mode, op0, op1, target,
9414 					       unsignedp, this_optab);
9415 		  return REDUCE_BIT_FIELD (temp);
9416 		}
9417 	      if (find_widening_optab_handler (other_optab, mode, innermode)
9418 		  != CODE_FOR_nothing
9419 		  && innermode == word_mode)
9420 		{
9421 		  rtx htem, hipart;
9422 		  op0 = expand_normal (treeop0);
9423 		  op1 = expand_normal (treeop1);
9424 		  /* op0 and op1 might be constants, despite the above
9425 		     != INTEGER_CST check.  Handle it.  */
9426 		  if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
9427 		    goto widen_mult_const;
9428 		  temp = expand_binop (mode, other_optab, op0, op1, target,
9429 				       unsignedp, OPTAB_LIB_WIDEN);
9430 		  hipart = gen_highpart (word_mode, temp);
9431 		  htem = expand_mult_highpart_adjust (word_mode, hipart,
9432 						      op0, op1, hipart,
9433 						      zextend_p);
9434 		  if (htem != hipart)
9435 		    emit_move_insn (hipart, htem);
9436 		  return REDUCE_BIT_FIELD (temp);
9437 		}
9438 	    }
9439 	}
9440       treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
9441       treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
9442       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9443       return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
9444 
9445     case MULT_EXPR:
9446       /* If this is a fixed-point operation, then we cannot use the code
9447 	 below because "expand_mult" doesn't support sat/no-sat fixed-point
9448          multiplications.   */
9449       if (ALL_FIXED_POINT_MODE_P (mode))
9450 	goto binop;
9451 
9452       /* If first operand is constant, swap them.
9453 	 Thus the following special case checks need only
9454 	 check the second operand.  */
9455       if (TREE_CODE (treeop0) == INTEGER_CST)
9456 	std::swap (treeop0, treeop1);
9457 
9458       /* Attempt to return something suitable for generating an
9459 	 indexed address, for machines that support that.  */
9460 
9461       if (modifier == EXPAND_SUM && mode == ptr_mode
9462 	  && tree_fits_shwi_p (treeop1))
9463 	{
9464 	  tree exp1 = treeop1;
9465 
9466 	  op0 = expand_expr (treeop0, subtarget, VOIDmode,
9467 			     EXPAND_SUM);
9468 
9469 	  if (!REG_P (op0))
9470 	    op0 = force_operand (op0, NULL_RTX);
9471 	  if (!REG_P (op0))
9472 	    op0 = copy_to_mode_reg (mode, op0);
9473 
9474 	  op1 = gen_int_mode (tree_to_shwi (exp1),
9475 			      TYPE_MODE (TREE_TYPE (exp1)));
9476 	  return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0, op1));
9477 	}
9478 
9479       if (modifier == EXPAND_STACK_PARM)
9480 	target = 0;
9481 
9482       if (SCALAR_INT_MODE_P (mode) && optimize >= 2)
9483 	{
9484 	  gimple *def_stmt0 = get_def_for_expr (treeop0, TRUNC_DIV_EXPR);
9485 	  gimple *def_stmt1 = get_def_for_expr (treeop1, TRUNC_DIV_EXPR);
9486 	  if (def_stmt0
9487 	      && !operand_equal_p (treeop1, gimple_assign_rhs2 (def_stmt0), 0))
9488 	    def_stmt0 = NULL;
9489 	  if (def_stmt1
9490 	      && !operand_equal_p (treeop0, gimple_assign_rhs2 (def_stmt1), 0))
9491 	    def_stmt1 = NULL;
9492 
9493 	  if (def_stmt0 || def_stmt1)
9494 	    {
9495 	      /* X / Y * Y can be expanded as X - X % Y too.
9496 		 Choose the cheaper sequence of those two.  */
9497 	      if (def_stmt0)
9498 		treeop0 = gimple_assign_rhs1 (def_stmt0);
9499 	      else
9500 		{
9501 		  treeop1 = treeop0;
9502 		  treeop0 = gimple_assign_rhs1 (def_stmt1);
9503 		}
9504 	      expand_operands (treeop0, treeop1, subtarget, &op0, &op1,
9505 			       EXPAND_NORMAL);
9506 	      bool speed_p = optimize_insn_for_speed_p ();
9507 	      do_pending_stack_adjust ();
9508 	      start_sequence ();
9509 	      rtx divmul_ret
9510 		= expand_expr_divmod (TRUNC_DIV_EXPR, mode, treeop0, treeop1,
9511 				      op0, op1, NULL_RTX, unsignedp);
9512 	      divmul_ret = expand_mult (mode, divmul_ret, op1, target,
9513 					unsignedp);
9514 	      rtx_insn *divmul_insns = get_insns ();
9515 	      end_sequence ();
9516 	      start_sequence ();
9517 	      rtx modsub_ret
9518 		= expand_expr_divmod (TRUNC_MOD_EXPR, mode, treeop0, treeop1,
9519 				      op0, op1, NULL_RTX, unsignedp);
9520 	      this_optab = optab_for_tree_code (MINUS_EXPR, type,
9521 						optab_default);
9522 	      modsub_ret = expand_binop (mode, this_optab, op0, modsub_ret,
9523 					 target, unsignedp, OPTAB_LIB_WIDEN);
9524 	      rtx_insn *modsub_insns = get_insns ();
9525 	      end_sequence ();
9526 	      unsigned divmul_cost = seq_cost (divmul_insns, speed_p);
9527 	      unsigned modsub_cost = seq_cost (modsub_insns, speed_p);
9528 	      /* If costs are the same then use as tie breaker the other other
9529 		 factor.  */
9530 	      if (divmul_cost == modsub_cost)
9531 		{
9532 		  divmul_cost = seq_cost (divmul_insns, !speed_p);
9533 		  modsub_cost = seq_cost (modsub_insns, !speed_p);
9534 		}
9535 
9536 	      if (divmul_cost <= modsub_cost)
9537 		{
9538 		  emit_insn (divmul_insns);
9539 		  return REDUCE_BIT_FIELD (divmul_ret);
9540 		}
9541 	      emit_insn (modsub_insns);
9542 	      return REDUCE_BIT_FIELD (modsub_ret);
9543 	    }
9544 	}
9545 
9546       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9547       return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
9548 
9549     case TRUNC_MOD_EXPR:
9550     case FLOOR_MOD_EXPR:
9551     case CEIL_MOD_EXPR:
9552     case ROUND_MOD_EXPR:
9553 
9554     case TRUNC_DIV_EXPR:
9555     case FLOOR_DIV_EXPR:
9556     case CEIL_DIV_EXPR:
9557     case ROUND_DIV_EXPR:
9558     case EXACT_DIV_EXPR:
9559       /* If this is a fixed-point operation, then we cannot use the code
9560 	 below because "expand_divmod" doesn't support sat/no-sat fixed-point
9561 	 divisions.   */
9562       if (ALL_FIXED_POINT_MODE_P (mode))
9563 	goto binop;
9564 
9565       if (modifier == EXPAND_STACK_PARM)
9566 	target = 0;
9567       /* Possible optimization: compute the dividend with EXPAND_SUM
9568 	 then if the divisor is constant can optimize the case
9569 	 where some terms of the dividend have coeffs divisible by it.  */
9570       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9571       return expand_expr_divmod (code, mode, treeop0, treeop1, op0, op1,
9572 				 target, unsignedp);
9573 
9574     case RDIV_EXPR:
9575       goto binop;
9576 
9577     case MULT_HIGHPART_EXPR:
9578       expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9579       temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
9580       gcc_assert (temp);
9581       return temp;
9582 
9583     case FIXED_CONVERT_EXPR:
9584       op0 = expand_normal (treeop0);
9585       if (target == 0 || modifier == EXPAND_STACK_PARM)
9586 	target = gen_reg_rtx (mode);
9587 
9588       if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
9589 	   && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9590           || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
9591 	expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
9592       else
9593 	expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
9594       return target;
9595 
9596     case FIX_TRUNC_EXPR:
9597       op0 = expand_normal (treeop0);
9598       if (target == 0 || modifier == EXPAND_STACK_PARM)
9599 	target = gen_reg_rtx (mode);
9600       expand_fix (target, op0, unsignedp);
9601       return target;
9602 
9603     case FLOAT_EXPR:
9604       op0 = expand_normal (treeop0);
9605       if (target == 0 || modifier == EXPAND_STACK_PARM)
9606 	target = gen_reg_rtx (mode);
9607       /* expand_float can't figure out what to do if FROM has VOIDmode.
9608 	 So give it the correct mode.  With -O, cse will optimize this.  */
9609       if (GET_MODE (op0) == VOIDmode)
9610 	op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
9611 				op0);
9612       expand_float (target, op0,
9613 		    TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9614       return target;
9615 
9616     case NEGATE_EXPR:
9617       op0 = expand_expr (treeop0, subtarget,
9618 			 VOIDmode, EXPAND_NORMAL);
9619       if (modifier == EXPAND_STACK_PARM)
9620 	target = 0;
9621       temp = expand_unop (mode,
9622       			  optab_for_tree_code (NEGATE_EXPR, type,
9623 					       optab_default),
9624 			  op0, target, 0);
9625       gcc_assert (temp);
9626       return REDUCE_BIT_FIELD (temp);
9627 
9628     case ABS_EXPR:
9629     case ABSU_EXPR:
9630       op0 = expand_expr (treeop0, subtarget,
9631 			 VOIDmode, EXPAND_NORMAL);
9632       if (modifier == EXPAND_STACK_PARM)
9633 	target = 0;
9634 
9635       /* ABS_EXPR is not valid for complex arguments.  */
9636       gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
9637 		  && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
9638 
9639       /* Unsigned abs is simply the operand.  Testing here means we don't
9640 	 risk generating incorrect code below.  */
9641       if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9642 	return op0;
9643 
9644       return expand_abs (mode, op0, target, unsignedp,
9645 			 safe_from_p (target, treeop0, 1));
9646 
9647     case MAX_EXPR:
9648     case MIN_EXPR:
9649       target = original_target;
9650       if (target == 0
9651 	  || modifier == EXPAND_STACK_PARM
9652 	  || (MEM_P (target) && MEM_VOLATILE_P (target))
9653 	  || GET_MODE (target) != mode
9654 	  || (REG_P (target)
9655 	      && REGNO (target) < FIRST_PSEUDO_REGISTER))
9656 	target = gen_reg_rtx (mode);
9657       expand_operands (treeop0, treeop1,
9658 		       target, &op0, &op1, EXPAND_NORMAL);
9659 
9660       /* First try to do it with a special MIN or MAX instruction.
9661 	 If that does not win, use a conditional jump to select the proper
9662 	 value.  */
9663       this_optab = optab_for_tree_code (code, type, optab_default);
9664       temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
9665 			   OPTAB_WIDEN);
9666       if (temp != 0)
9667 	return temp;
9668 
9669       if (VECTOR_TYPE_P (type))
9670 	gcc_unreachable ();
9671 
9672       /* At this point, a MEM target is no longer useful; we will get better
9673 	 code without it.  */
9674 
9675       if (! REG_P (target))
9676 	target = gen_reg_rtx (mode);
9677 
9678       /* If op1 was placed in target, swap op0 and op1.  */
9679       if (target != op0 && target == op1)
9680 	std::swap (op0, op1);
9681 
9682       /* We generate better code and avoid problems with op1 mentioning
9683 	 target by forcing op1 into a pseudo if it isn't a constant.  */
9684       if (! CONSTANT_P (op1))
9685 	op1 = force_reg (mode, op1);
9686 
9687       {
9688 	enum rtx_code comparison_code;
9689 	rtx cmpop1 = op1;
9690 
9691 	if (code == MAX_EXPR)
9692 	  comparison_code = unsignedp ? GEU : GE;
9693 	else
9694 	  comparison_code = unsignedp ? LEU : LE;
9695 
9696 	/* Canonicalize to comparisons against 0.  */
9697 	if (op1 == const1_rtx)
9698 	  {
9699 	    /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9700 	       or (a != 0 ? a : 1) for unsigned.
9701 	       For MIN we are safe converting (a <= 1 ? a : 1)
9702 	       into (a <= 0 ? a : 1)  */
9703 	    cmpop1 = const0_rtx;
9704 	    if (code == MAX_EXPR)
9705 	      comparison_code = unsignedp ? NE : GT;
9706 	  }
9707 	if (op1 == constm1_rtx && !unsignedp)
9708 	  {
9709 	    /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9710 	       and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9711 	    cmpop1 = const0_rtx;
9712 	    if (code == MIN_EXPR)
9713 	      comparison_code = LT;
9714 	  }
9715 
9716 	/* Use a conditional move if possible.  */
9717 	if (can_conditionally_move_p (mode))
9718 	  {
9719 	    rtx insn;
9720 
9721 	    start_sequence ();
9722 
9723 	    /* Try to emit the conditional move.  */
9724 	    insn = emit_conditional_move (target,
9725 					  { comparison_code,
9726 					    op0, cmpop1, mode },
9727 					  op0, op1, mode,
9728 					  unsignedp);
9729 
9730 	    /* If we could do the conditional move, emit the sequence,
9731 	       and return.  */
9732 	    if (insn)
9733 	      {
9734 		rtx_insn *seq = get_insns ();
9735 		end_sequence ();
9736 		emit_insn (seq);
9737 		return target;
9738 	      }
9739 
9740 	    /* Otherwise discard the sequence and fall back to code with
9741 	       branches.  */
9742 	    end_sequence ();
9743 	  }
9744 
9745 	if (target != op0)
9746 	  emit_move_insn (target, op0);
9747 
9748 	lab = gen_label_rtx ();
9749 	do_compare_rtx_and_jump (target, cmpop1, comparison_code,
9750 				 unsignedp, mode, NULL_RTX, NULL, lab,
9751 				 profile_probability::uninitialized ());
9752       }
9753       emit_move_insn (target, op1);
9754       emit_label (lab);
9755       return target;
9756 
9757     case BIT_NOT_EXPR:
9758       op0 = expand_expr (treeop0, subtarget,
9759 			 VOIDmode, EXPAND_NORMAL);
9760       if (modifier == EXPAND_STACK_PARM)
9761 	target = 0;
9762       /* In case we have to reduce the result to bitfield precision
9763 	 for unsigned bitfield expand this as XOR with a proper constant
9764 	 instead.  */
9765       if (reduce_bit_field && TYPE_UNSIGNED (type))
9766 	{
9767 	  int_mode = SCALAR_INT_TYPE_MODE (type);
9768 	  wide_int mask = wi::mask (TYPE_PRECISION (type),
9769 				    false, GET_MODE_PRECISION (int_mode));
9770 
9771 	  temp = expand_binop (int_mode, xor_optab, op0,
9772 			       immed_wide_int_const (mask, int_mode),
9773 			       target, 1, OPTAB_LIB_WIDEN);
9774 	}
9775       else
9776 	temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
9777       gcc_assert (temp);
9778       return temp;
9779 
9780       /* ??? Can optimize bitwise operations with one arg constant.
9781 	 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9782 	 and (a bitwise1 b) bitwise2 b (etc)
9783 	 but that is probably not worth while.  */
9784 
9785     case BIT_AND_EXPR:
9786     case BIT_IOR_EXPR:
9787     case BIT_XOR_EXPR:
9788       goto binop;
9789 
9790     case LROTATE_EXPR:
9791     case RROTATE_EXPR:
9792       gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
9793 		  || type_has_mode_precision_p (type));
9794       /* fall through */
9795 
9796     case LSHIFT_EXPR:
9797     case RSHIFT_EXPR:
9798       {
9799 	/* If this is a fixed-point operation, then we cannot use the code
9800 	   below because "expand_shift" doesn't support sat/no-sat fixed-point
9801 	   shifts.  */
9802 	if (ALL_FIXED_POINT_MODE_P (mode))
9803 	  goto binop;
9804 
9805 	if (! safe_from_p (subtarget, treeop1, 1))
9806 	  subtarget = 0;
9807 	if (modifier == EXPAND_STACK_PARM)
9808 	  target = 0;
9809 	op0 = expand_expr (treeop0, subtarget,
9810 			   VOIDmode, EXPAND_NORMAL);
9811 
9812 	/* Left shift optimization when shifting across word_size boundary.
9813 
9814 	   If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9815 	   there isn't native instruction to support this wide mode
9816 	   left shift.  Given below scenario:
9817 
9818 	    Type A = (Type) B  << C
9819 
9820 	    |<		 T	    >|
9821 	    | dest_high  |  dest_low |
9822 
9823 			 | word_size |
9824 
9825 	   If the shift amount C caused we shift B to across the word
9826 	   size boundary, i.e part of B shifted into high half of
9827 	   destination register, and part of B remains in the low
9828 	   half, then GCC will use the following left shift expand
9829 	   logic:
9830 
9831 	   1. Initialize dest_low to B.
9832 	   2. Initialize every bit of dest_high to the sign bit of B.
9833 	   3. Logic left shift dest_low by C bit to finalize dest_low.
9834 	      The value of dest_low before this shift is kept in a temp D.
9835 	   4. Logic left shift dest_high by C.
9836 	   5. Logic right shift D by (word_size - C).
9837 	   6. Or the result of 4 and 5 to finalize dest_high.
9838 
9839 	   While, by checking gimple statements, if operand B is
9840 	   coming from signed extension, then we can simplify above
9841 	   expand logic into:
9842 
9843 	      1. dest_high = src_low >> (word_size - C).
9844 	      2. dest_low = src_low << C.
9845 
9846 	   We can use one arithmetic right shift to finish all the
9847 	   purpose of steps 2, 4, 5, 6, thus we reduce the steps
9848 	   needed from 6 into 2.
9849 
9850 	   The case is similar for zero extension, except that we
9851 	   initialize dest_high to zero rather than copies of the sign
9852 	   bit from B.  Furthermore, we need to use a logical right shift
9853 	   in this case.
9854 
9855 	   The choice of sign-extension versus zero-extension is
9856 	   determined entirely by whether or not B is signed and is
9857 	   independent of the current setting of unsignedp.  */
9858 
9859 	temp = NULL_RTX;
9860 	if (code == LSHIFT_EXPR
9861 	    && target
9862 	    && REG_P (target)
9863 	    && GET_MODE_2XWIDER_MODE (word_mode).exists (&int_mode)
9864 	    && mode == int_mode
9865 	    && TREE_CONSTANT (treeop1)
9866 	    && TREE_CODE (treeop0) == SSA_NAME)
9867 	  {
9868 	    gimple *def = SSA_NAME_DEF_STMT (treeop0);
9869 	    if (is_gimple_assign (def)
9870 		&& gimple_assign_rhs_code (def) == NOP_EXPR)
9871 	      {
9872 		scalar_int_mode rmode = SCALAR_INT_TYPE_MODE
9873 		  (TREE_TYPE (gimple_assign_rhs1 (def)));
9874 
9875 		if (GET_MODE_SIZE (rmode) < GET_MODE_SIZE (int_mode)
9876 		    && TREE_INT_CST_LOW (treeop1) < GET_MODE_BITSIZE (word_mode)
9877 		    && ((TREE_INT_CST_LOW (treeop1) + GET_MODE_BITSIZE (rmode))
9878 			>= GET_MODE_BITSIZE (word_mode)))
9879 		  {
9880 		    rtx_insn *seq, *seq_old;
9881 		    poly_uint64 high_off = subreg_highpart_offset (word_mode,
9882 								   int_mode);
9883 		    bool extend_unsigned
9884 		      = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
9885 		    rtx low = lowpart_subreg (word_mode, op0, int_mode);
9886 		    rtx dest_low = lowpart_subreg (word_mode, target, int_mode);
9887 		    rtx dest_high = simplify_gen_subreg (word_mode, target,
9888 							 int_mode, high_off);
9889 		    HOST_WIDE_INT ramount = (BITS_PER_WORD
9890 					     - TREE_INT_CST_LOW (treeop1));
9891 		    tree rshift = build_int_cst (TREE_TYPE (treeop1), ramount);
9892 
9893 		    start_sequence ();
9894 		    /* dest_high = src_low >> (word_size - C).  */
9895 		    temp = expand_variable_shift (RSHIFT_EXPR, word_mode, low,
9896 						  rshift, dest_high,
9897 						  extend_unsigned);
9898 		    if (temp != dest_high)
9899 		      emit_move_insn (dest_high, temp);
9900 
9901 		    /* dest_low = src_low << C.  */
9902 		    temp = expand_variable_shift (LSHIFT_EXPR, word_mode, low,
9903 						  treeop1, dest_low, unsignedp);
9904 		    if (temp != dest_low)
9905 		      emit_move_insn (dest_low, temp);
9906 
9907 		    seq = get_insns ();
9908 		    end_sequence ();
9909 		    temp = target ;
9910 
9911 		    if (have_insn_for (ASHIFT, int_mode))
9912 		      {
9913 			bool speed_p = optimize_insn_for_speed_p ();
9914 			start_sequence ();
9915 			rtx ret_old = expand_variable_shift (code, int_mode,
9916 							     op0, treeop1,
9917 							     target,
9918 							     unsignedp);
9919 
9920 			seq_old = get_insns ();
9921 			end_sequence ();
9922 			if (seq_cost (seq, speed_p)
9923 			    >= seq_cost (seq_old, speed_p))
9924 			  {
9925 			    seq = seq_old;
9926 			    temp = ret_old;
9927 			  }
9928 		      }
9929 		      emit_insn (seq);
9930 		  }
9931 	      }
9932 	  }
9933 
9934 	if (temp == NULL_RTX)
9935 	  temp = expand_variable_shift (code, mode, op0, treeop1, target,
9936 					unsignedp);
9937 	if (code == LSHIFT_EXPR)
9938 	  temp = REDUCE_BIT_FIELD (temp);
9939 	return temp;
9940       }
9941 
9942       /* Could determine the answer when only additive constants differ.  Also,
9943 	 the addition of one can be handled by changing the condition.  */
9944     case LT_EXPR:
9945     case LE_EXPR:
9946     case GT_EXPR:
9947     case GE_EXPR:
9948     case EQ_EXPR:
9949     case NE_EXPR:
9950     case UNORDERED_EXPR:
9951     case ORDERED_EXPR:
9952     case UNLT_EXPR:
9953     case UNLE_EXPR:
9954     case UNGT_EXPR:
9955     case UNGE_EXPR:
9956     case UNEQ_EXPR:
9957     case LTGT_EXPR:
9958       {
9959 	temp = do_store_flag (ops,
9960 			      modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
9961 			      tmode != VOIDmode ? tmode : mode);
9962 	if (temp)
9963 	  return temp;
9964 
9965 	/* Use a compare and a jump for BLKmode comparisons, or for function
9966 	   type comparisons is have_canonicalize_funcptr_for_compare.  */
9967 
9968 	if ((target == 0
9969 	     || modifier == EXPAND_STACK_PARM
9970 	     || ! safe_from_p (target, treeop0, 1)
9971 	     || ! safe_from_p (target, treeop1, 1)
9972 	     /* Make sure we don't have a hard reg (such as function's return
9973 		value) live across basic blocks, if not optimizing.  */
9974 	     || (!optimize && REG_P (target)
9975 		 && REGNO (target) < FIRST_PSEUDO_REGISTER)))
9976 	  target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
9977 
9978 	emit_move_insn (target, const0_rtx);
9979 
9980 	rtx_code_label *lab1 = gen_label_rtx ();
9981 	jumpifnot_1 (code, treeop0, treeop1, lab1,
9982 		     profile_probability::uninitialized ());
9983 
9984 	if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
9985 	  emit_move_insn (target, constm1_rtx);
9986 	else
9987 	  emit_move_insn (target, const1_rtx);
9988 
9989 	emit_label (lab1);
9990 	return target;
9991       }
9992     case COMPLEX_EXPR:
9993       /* Get the rtx code of the operands.  */
9994       op0 = expand_normal (treeop0);
9995       op1 = expand_normal (treeop1);
9996 
9997       if (!target)
9998 	target = gen_reg_rtx (TYPE_MODE (type));
9999       else
10000 	/* If target overlaps with op1, then either we need to force
10001 	   op1 into a pseudo (if target also overlaps with op0),
10002 	   or write the complex parts in reverse order.  */
10003 	switch (GET_CODE (target))
10004 	  {
10005 	  case CONCAT:
10006 	    if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
10007 	      {
10008 		if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
10009 		  {
10010 		  complex_expr_force_op1:
10011 		    temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
10012 		    emit_move_insn (temp, op1);
10013 		    op1 = temp;
10014 		    break;
10015 		  }
10016 	      complex_expr_swap_order:
10017 		/* Move the imaginary (op1) and real (op0) parts to their
10018 		   location.  */
10019 		write_complex_part (target, op1, true);
10020 		write_complex_part (target, op0, false);
10021 
10022 		return target;
10023 	      }
10024 	    break;
10025 	  case MEM:
10026 	    temp = adjust_address_nv (target,
10027 				      GET_MODE_INNER (GET_MODE (target)), 0);
10028 	    if (reg_overlap_mentioned_p (temp, op1))
10029 	      {
10030 		scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
10031 		temp = adjust_address_nv (target, imode,
10032 					  GET_MODE_SIZE (imode));
10033 		if (reg_overlap_mentioned_p (temp, op0))
10034 		  goto complex_expr_force_op1;
10035 		goto complex_expr_swap_order;
10036 	      }
10037 	    break;
10038 	  default:
10039 	    if (reg_overlap_mentioned_p (target, op1))
10040 	      {
10041 		if (reg_overlap_mentioned_p (target, op0))
10042 		  goto complex_expr_force_op1;
10043 		goto complex_expr_swap_order;
10044 	      }
10045 	    break;
10046 	  }
10047 
10048       /* Move the real (op0) and imaginary (op1) parts to their location.  */
10049       write_complex_part (target, op0, false);
10050       write_complex_part (target, op1, true);
10051 
10052       return target;
10053 
10054     case WIDEN_SUM_EXPR:
10055       {
10056         tree oprnd0 = treeop0;
10057         tree oprnd1 = treeop1;
10058 
10059         expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
10060         target = expand_widen_pattern_expr (ops, op0, NULL_RTX, op1,
10061                                             target, unsignedp);
10062         return target;
10063       }
10064 
10065     case VEC_UNPACK_HI_EXPR:
10066     case VEC_UNPACK_LO_EXPR:
10067     case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
10068     case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
10069       {
10070 	op0 = expand_normal (treeop0);
10071 	temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
10072 					  target, unsignedp);
10073 	gcc_assert (temp);
10074 	return temp;
10075       }
10076 
10077     case VEC_UNPACK_FLOAT_HI_EXPR:
10078     case VEC_UNPACK_FLOAT_LO_EXPR:
10079       {
10080 	op0 = expand_normal (treeop0);
10081 	/* The signedness is determined from input operand.  */
10082 	temp = expand_widen_pattern_expr
10083 	  (ops, op0, NULL_RTX, NULL_RTX,
10084 	   target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
10085 
10086 	gcc_assert (temp);
10087 	return temp;
10088       }
10089 
10090     case VEC_WIDEN_PLUS_HI_EXPR:
10091     case VEC_WIDEN_PLUS_LO_EXPR:
10092     case VEC_WIDEN_MINUS_HI_EXPR:
10093     case VEC_WIDEN_MINUS_LO_EXPR:
10094     case VEC_WIDEN_MULT_HI_EXPR:
10095     case VEC_WIDEN_MULT_LO_EXPR:
10096     case VEC_WIDEN_MULT_EVEN_EXPR:
10097     case VEC_WIDEN_MULT_ODD_EXPR:
10098     case VEC_WIDEN_LSHIFT_HI_EXPR:
10099     case VEC_WIDEN_LSHIFT_LO_EXPR:
10100       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
10101       target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
10102 					  target, unsignedp);
10103       gcc_assert (target);
10104       return target;
10105 
10106     case VEC_PACK_SAT_EXPR:
10107     case VEC_PACK_FIX_TRUNC_EXPR:
10108       mode = TYPE_MODE (TREE_TYPE (treeop0));
10109       subtarget = NULL_RTX;
10110       goto binop;
10111 
10112     case VEC_PACK_TRUNC_EXPR:
10113       if (VECTOR_BOOLEAN_TYPE_P (type)
10114 	  && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0))
10115 	  && mode == TYPE_MODE (TREE_TYPE (treeop0))
10116 	  && SCALAR_INT_MODE_P (mode))
10117 	{
10118 	  class expand_operand eops[4];
10119 	  machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
10120 	  expand_operands (treeop0, treeop1,
10121 			   subtarget, &op0, &op1, EXPAND_NORMAL);
10122 	  this_optab = vec_pack_sbool_trunc_optab;
10123 	  enum insn_code icode = optab_handler (this_optab, imode);
10124 	  create_output_operand (&eops[0], target, mode);
10125 	  create_convert_operand_from (&eops[1], op0, imode, false);
10126 	  create_convert_operand_from (&eops[2], op1, imode, false);
10127 	  temp = GEN_INT (TYPE_VECTOR_SUBPARTS (type).to_constant ());
10128 	  create_input_operand (&eops[3], temp, imode);
10129 	  expand_insn (icode, 4, eops);
10130 	  return eops[0].value;
10131 	}
10132       mode = TYPE_MODE (TREE_TYPE (treeop0));
10133       subtarget = NULL_RTX;
10134       goto binop;
10135 
10136     case VEC_PACK_FLOAT_EXPR:
10137       mode = TYPE_MODE (TREE_TYPE (treeop0));
10138       expand_operands (treeop0, treeop1,
10139 		       subtarget, &op0, &op1, EXPAND_NORMAL);
10140       this_optab = optab_for_tree_code (code, TREE_TYPE (treeop0),
10141 					optab_default);
10142       target = expand_binop (mode, this_optab, op0, op1, target,
10143 			     TYPE_UNSIGNED (TREE_TYPE (treeop0)),
10144 			     OPTAB_LIB_WIDEN);
10145       gcc_assert (target);
10146       return target;
10147 
10148     case VEC_PERM_EXPR:
10149       {
10150 	expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
10151 	vec_perm_builder sel;
10152 	if (TREE_CODE (treeop2) == VECTOR_CST
10153 	    && tree_to_vec_perm_builder (&sel, treeop2))
10154 	  {
10155 	    machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
10156 	    temp = expand_vec_perm_const (mode, op0, op1, sel,
10157 					  sel_mode, target);
10158 	  }
10159 	else
10160 	  {
10161 	    op2 = expand_normal (treeop2);
10162 	    temp = expand_vec_perm_var (mode, op0, op1, op2, target);
10163 	  }
10164 	gcc_assert (temp);
10165 	return temp;
10166       }
10167 
10168     case DOT_PROD_EXPR:
10169       {
10170 	tree oprnd0 = treeop0;
10171 	tree oprnd1 = treeop1;
10172 	tree oprnd2 = treeop2;
10173 
10174 	expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
10175 	op2 = expand_normal (oprnd2);
10176 	target = expand_widen_pattern_expr (ops, op0, op1, op2,
10177 					    target, unsignedp);
10178 	return target;
10179       }
10180 
10181       case SAD_EXPR:
10182       {
10183 	tree oprnd0 = treeop0;
10184 	tree oprnd1 = treeop1;
10185 	tree oprnd2 = treeop2;
10186 
10187 	expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
10188 	op2 = expand_normal (oprnd2);
10189 	target = expand_widen_pattern_expr (ops, op0, op1, op2,
10190 					    target, unsignedp);
10191 	return target;
10192       }
10193 
10194     case REALIGN_LOAD_EXPR:
10195       {
10196         tree oprnd0 = treeop0;
10197         tree oprnd1 = treeop1;
10198         tree oprnd2 = treeop2;
10199 
10200         this_optab = optab_for_tree_code (code, type, optab_default);
10201         expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
10202         op2 = expand_normal (oprnd2);
10203         temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
10204 				  target, unsignedp);
10205         gcc_assert (temp);
10206         return temp;
10207       }
10208 
10209     case COND_EXPR:
10210       {
10211 	/* A COND_EXPR with its type being VOID_TYPE represents a
10212 	   conditional jump and is handled in
10213 	   expand_gimple_cond_expr.  */
10214 	gcc_assert (!VOID_TYPE_P (type));
10215 
10216 	/* Note that COND_EXPRs whose type is a structure or union
10217 	   are required to be constructed to contain assignments of
10218 	   a temporary variable, so that we can evaluate them here
10219 	   for side effect only.  If type is void, we must do likewise.  */
10220 
10221 	gcc_assert (!TREE_ADDRESSABLE (type)
10222 		    && !ignore
10223 		    && TREE_TYPE (treeop1) != void_type_node
10224 		    && TREE_TYPE (treeop2) != void_type_node);
10225 
10226 	temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
10227 	if (temp)
10228 	  return temp;
10229 
10230 	/* If we are not to produce a result, we have no target.  Otherwise,
10231 	   if a target was specified use it; it will not be used as an
10232 	   intermediate target unless it is safe.  If no target, use a
10233 	   temporary.  */
10234 
10235 	if (modifier != EXPAND_STACK_PARM
10236 	    && original_target
10237 	    && safe_from_p (original_target, treeop0, 1)
10238 	    && GET_MODE (original_target) == mode
10239 	    && !MEM_P (original_target))
10240 	  temp = original_target;
10241 	else
10242 	  temp = assign_temp (type, 0, 1);
10243 
10244 	do_pending_stack_adjust ();
10245 	NO_DEFER_POP;
10246 	rtx_code_label *lab0 = gen_label_rtx ();
10247 	rtx_code_label *lab1 = gen_label_rtx ();
10248 	jumpifnot (treeop0, lab0,
10249 		   profile_probability::uninitialized ());
10250 	store_expr (treeop1, temp,
10251 		    modifier == EXPAND_STACK_PARM,
10252 		    false, false);
10253 
10254 	emit_jump_insn (targetm.gen_jump (lab1));
10255 	emit_barrier ();
10256 	emit_label (lab0);
10257 	store_expr (treeop2, temp,
10258 		    modifier == EXPAND_STACK_PARM,
10259 		    false, false);
10260 
10261 	emit_label (lab1);
10262 	OK_DEFER_POP;
10263 	return temp;
10264       }
10265 
10266     case VEC_DUPLICATE_EXPR:
10267       op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
10268       target = expand_vector_broadcast (mode, op0);
10269       gcc_assert (target);
10270       return target;
10271 
10272     case VEC_SERIES_EXPR:
10273       expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
10274       return expand_vec_series_expr (mode, op0, op1, target);
10275 
10276     case BIT_INSERT_EXPR:
10277       {
10278 	unsigned bitpos = tree_to_uhwi (treeop2);
10279 	unsigned bitsize;
10280 	if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1)))
10281 	  bitsize = TYPE_PRECISION (TREE_TYPE (treeop1));
10282 	else
10283 	  bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1)));
10284 	op0 = expand_normal (treeop0);
10285 	op1 = expand_normal (treeop1);
10286 	rtx dst = gen_reg_rtx (mode);
10287 	emit_move_insn (dst, op0);
10288 	store_bit_field (dst, bitsize, bitpos, 0, 0,
10289 			 TYPE_MODE (TREE_TYPE (treeop1)), op1, false);
10290 	return dst;
10291       }
10292 
10293     default:
10294       gcc_unreachable ();
10295     }
10296 
10297   /* Here to do an ordinary binary operator.  */
10298  binop:
10299   expand_operands (treeop0, treeop1,
10300 		   subtarget, &op0, &op1, EXPAND_NORMAL);
10301  binop2:
10302   this_optab = optab_for_tree_code (code, type, optab_default);
10303  binop3:
10304   if (modifier == EXPAND_STACK_PARM)
10305     target = 0;
10306   temp = expand_binop (mode, this_optab, op0, op1, target,
10307 		       unsignedp, OPTAB_LIB_WIDEN);
10308   gcc_assert (temp);
10309   /* Bitwise operations do not need bitfield reduction as we expect their
10310      operands being properly truncated.  */
10311   if (code == BIT_XOR_EXPR
10312       || code == BIT_AND_EXPR
10313       || code == BIT_IOR_EXPR)
10314     return temp;
10315   return REDUCE_BIT_FIELD (temp);
10316 }
10317 #undef REDUCE_BIT_FIELD
10318 
10319 
10320 /* Return TRUE if expression STMT is suitable for replacement.
10321    Never consider memory loads as replaceable, because those don't ever lead
10322    into constant expressions.  */
10323 
10324 static bool
stmt_is_replaceable_p(gimple * stmt)10325 stmt_is_replaceable_p (gimple *stmt)
10326 {
10327   if (ssa_is_replaceable_p (stmt))
10328     {
10329       /* Don't move around loads.  */
10330       if (!gimple_assign_single_p (stmt)
10331 	  || is_gimple_val (gimple_assign_rhs1 (stmt)))
10332 	return true;
10333     }
10334   return false;
10335 }
10336 
10337 rtx
expand_expr_real_1(tree exp,rtx target,machine_mode tmode,enum expand_modifier modifier,rtx * alt_rtl,bool inner_reference_p)10338 expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
10339 		    enum expand_modifier modifier, rtx *alt_rtl,
10340 		    bool inner_reference_p)
10341 {
10342   rtx op0, op1, temp, decl_rtl;
10343   tree type;
10344   int unsignedp;
10345   machine_mode mode, dmode;
10346   enum tree_code code = TREE_CODE (exp);
10347   rtx subtarget, original_target;
10348   int ignore;
10349   bool reduce_bit_field;
10350   location_t loc = EXPR_LOCATION (exp);
10351   struct separate_ops ops;
10352   tree treeop0, treeop1, treeop2;
10353   tree ssa_name = NULL_TREE;
10354   gimple *g;
10355 
10356   type = TREE_TYPE (exp);
10357   mode = TYPE_MODE (type);
10358   unsignedp = TYPE_UNSIGNED (type);
10359 
10360   treeop0 = treeop1 = treeop2 = NULL_TREE;
10361   if (!VL_EXP_CLASS_P (exp))
10362     switch (TREE_CODE_LENGTH (code))
10363       {
10364 	default:
10365 	case 3: treeop2 = TREE_OPERAND (exp, 2); /* FALLTHRU */
10366 	case 2: treeop1 = TREE_OPERAND (exp, 1); /* FALLTHRU */
10367 	case 1: treeop0 = TREE_OPERAND (exp, 0); /* FALLTHRU */
10368 	case 0: break;
10369       }
10370   ops.code = code;
10371   ops.type = type;
10372   ops.op0 = treeop0;
10373   ops.op1 = treeop1;
10374   ops.op2 = treeop2;
10375   ops.location = loc;
10376 
10377   ignore = (target == const0_rtx
10378 	    || ((CONVERT_EXPR_CODE_P (code)
10379 		 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
10380 		&& TREE_CODE (type) == VOID_TYPE));
10381 
10382   /* An operation in what may be a bit-field type needs the
10383      result to be reduced to the precision of the bit-field type,
10384      which is narrower than that of the type's mode.  */
10385   reduce_bit_field = (!ignore
10386 		      && INTEGRAL_TYPE_P (type)
10387 		      && !type_has_mode_precision_p (type));
10388 
10389   /* If we are going to ignore this result, we need only do something
10390      if there is a side-effect somewhere in the expression.  If there
10391      is, short-circuit the most common cases here.  Note that we must
10392      not call expand_expr with anything but const0_rtx in case this
10393      is an initial expansion of a size that contains a PLACEHOLDER_EXPR.  */
10394 
10395   if (ignore)
10396     {
10397       if (! TREE_SIDE_EFFECTS (exp))
10398 	return const0_rtx;
10399 
10400       /* Ensure we reference a volatile object even if value is ignored, but
10401 	 don't do this if all we are doing is taking its address.  */
10402       if (TREE_THIS_VOLATILE (exp)
10403 	  && TREE_CODE (exp) != FUNCTION_DECL
10404 	  && mode != VOIDmode && mode != BLKmode
10405 	  && modifier != EXPAND_CONST_ADDRESS)
10406 	{
10407 	  temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
10408 	  if (MEM_P (temp))
10409 	    copy_to_reg (temp);
10410 	  return const0_rtx;
10411 	}
10412 
10413       if (TREE_CODE_CLASS (code) == tcc_unary
10414 	  || code == BIT_FIELD_REF
10415 	  || code == COMPONENT_REF
10416 	  || code == INDIRECT_REF)
10417 	return expand_expr (treeop0, const0_rtx, VOIDmode,
10418 			    modifier);
10419 
10420       else if (TREE_CODE_CLASS (code) == tcc_binary
10421 	       || TREE_CODE_CLASS (code) == tcc_comparison
10422 	       || code == ARRAY_REF || code == ARRAY_RANGE_REF)
10423 	{
10424 	  expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
10425 	  expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
10426 	  return const0_rtx;
10427 	}
10428 
10429       target = 0;
10430     }
10431 
10432   if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
10433     target = 0;
10434 
10435   /* Use subtarget as the target for operand 0 of a binary operation.  */
10436   subtarget = get_subtarget (target);
10437   original_target = target;
10438 
10439   switch (code)
10440     {
10441     case LABEL_DECL:
10442       {
10443 	tree function = decl_function_context (exp);
10444 
10445 	temp = label_rtx (exp);
10446 	temp = gen_rtx_LABEL_REF (Pmode, temp);
10447 
10448 	if (function != current_function_decl
10449 	    && function != 0)
10450 	  LABEL_REF_NONLOCAL_P (temp) = 1;
10451 
10452 	temp = gen_rtx_MEM (FUNCTION_MODE, temp);
10453 	return temp;
10454       }
10455 
10456     case SSA_NAME:
10457       /* ??? ivopts calls expander, without any preparation from
10458          out-of-ssa.  So fake instructions as if this was an access to the
10459 	 base variable.  This unnecessarily allocates a pseudo, see how we can
10460 	 reuse it, if partition base vars have it set already.  */
10461       if (!currently_expanding_to_rtl)
10462 	{
10463 	  tree var = SSA_NAME_VAR (exp);
10464 	  if (var && DECL_RTL_SET_P (var))
10465 	    return DECL_RTL (var);
10466 	  return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
10467 			      LAST_VIRTUAL_REGISTER + 1);
10468 	}
10469 
10470       g = get_gimple_for_ssa_name (exp);
10471       /* For EXPAND_INITIALIZER try harder to get something simpler.  */
10472       if (g == NULL
10473 	  && modifier == EXPAND_INITIALIZER
10474 	  && !SSA_NAME_IS_DEFAULT_DEF (exp)
10475 	  && (optimize || !SSA_NAME_VAR (exp)
10476 	      || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
10477 	  && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
10478 	g = SSA_NAME_DEF_STMT (exp);
10479       if (g)
10480 	{
10481 	  rtx r;
10482 	  location_t saved_loc = curr_insn_location ();
10483 	  loc = gimple_location (g);
10484 	  if (loc != UNKNOWN_LOCATION)
10485 	    set_curr_insn_location (loc);
10486 	  ops.code = gimple_assign_rhs_code (g);
10487           switch (get_gimple_rhs_class (ops.code))
10488 	    {
10489 	    case GIMPLE_TERNARY_RHS:
10490 	      ops.op2 = gimple_assign_rhs3 (g);
10491 	      /* Fallthru */
10492 	    case GIMPLE_BINARY_RHS:
10493 	      ops.op1 = gimple_assign_rhs2 (g);
10494 
10495 	      /* Try to expand conditonal compare.  */
10496 	      if (targetm.gen_ccmp_first)
10497 		{
10498 		  gcc_checking_assert (targetm.gen_ccmp_next != NULL);
10499 		  r = expand_ccmp_expr (g, mode);
10500 		  if (r)
10501 		    break;
10502 		}
10503 	      /* Fallthru */
10504 	    case GIMPLE_UNARY_RHS:
10505 	      ops.op0 = gimple_assign_rhs1 (g);
10506 	      ops.type = TREE_TYPE (gimple_assign_lhs (g));
10507 	      ops.location = loc;
10508 	      r = expand_expr_real_2 (&ops, target, tmode, modifier);
10509 	      break;
10510 	    case GIMPLE_SINGLE_RHS:
10511 	      {
10512 		r = expand_expr_real (gimple_assign_rhs1 (g), target,
10513 				      tmode, modifier, alt_rtl,
10514 				      inner_reference_p);
10515 		break;
10516 	      }
10517 	    default:
10518 	      gcc_unreachable ();
10519 	    }
10520 	  set_curr_insn_location (saved_loc);
10521 	  if (REG_P (r) && !REG_EXPR (r))
10522 	    set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
10523 	  return r;
10524 	}
10525 
10526       ssa_name = exp;
10527       decl_rtl = get_rtx_for_ssa_name (ssa_name);
10528       exp = SSA_NAME_VAR (ssa_name);
10529       goto expand_decl_rtl;
10530 
10531     case VAR_DECL:
10532       /* Allow accel compiler to handle variables that require special
10533 	 treatment, e.g. if they have been modified in some way earlier in
10534 	 compilation by the adjust_private_decl OpenACC hook.  */
10535       if (flag_openacc && targetm.goacc.expand_var_decl)
10536 	{
10537 	  temp = targetm.goacc.expand_var_decl (exp);
10538 	  if (temp)
10539 	    return temp;
10540 	}
10541       /* ... fall through ...  */
10542 
10543     case PARM_DECL:
10544       /* If a static var's type was incomplete when the decl was written,
10545 	 but the type is complete now, lay out the decl now.  */
10546       if (DECL_SIZE (exp) == 0
10547 	  && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp))
10548 	  && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
10549 	layout_decl (exp, 0);
10550 
10551       /* fall through */
10552 
10553     case FUNCTION_DECL:
10554     case RESULT_DECL:
10555       decl_rtl = DECL_RTL (exp);
10556     expand_decl_rtl:
10557       gcc_assert (decl_rtl);
10558 
10559       /* DECL_MODE might change when TYPE_MODE depends on attribute target
10560 	 settings for VECTOR_TYPE_P that might switch for the function.  */
10561       if (currently_expanding_to_rtl
10562 	  && code == VAR_DECL && MEM_P (decl_rtl)
10563 	  && VECTOR_TYPE_P (type) && exp && DECL_MODE (exp) != mode)
10564 	decl_rtl = change_address (decl_rtl, TYPE_MODE (type), 0);
10565       else
10566 	decl_rtl = copy_rtx (decl_rtl);
10567 
10568       /* Record writes to register variables.  */
10569       if (modifier == EXPAND_WRITE
10570 	  && REG_P (decl_rtl)
10571 	  && HARD_REGISTER_P (decl_rtl))
10572         add_to_hard_reg_set (&crtl->asm_clobbers,
10573 			     GET_MODE (decl_rtl), REGNO (decl_rtl));
10574 
10575       /* Ensure variable marked as used even if it doesn't go through
10576 	 a parser.  If it hasn't be used yet, write out an external
10577 	 definition.  */
10578       if (exp)
10579 	TREE_USED (exp) = 1;
10580 
10581       /* Show we haven't gotten RTL for this yet.  */
10582       temp = 0;
10583 
10584       /* Variables inherited from containing functions should have
10585 	 been lowered by this point.  */
10586       if (exp)
10587 	{
10588 	  tree context = decl_function_context (exp);
10589 	  gcc_assert (SCOPE_FILE_SCOPE_P (context)
10590 		      || context == current_function_decl
10591 		      || TREE_STATIC (exp)
10592 		      || DECL_EXTERNAL (exp)
10593 		      /* ??? C++ creates functions that are not
10594 			 TREE_STATIC.  */
10595 		      || TREE_CODE (exp) == FUNCTION_DECL);
10596 	}
10597 
10598       /* This is the case of an array whose size is to be determined
10599 	 from its initializer, while the initializer is still being parsed.
10600 	 ??? We aren't parsing while expanding anymore.  */
10601 
10602       if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
10603 	temp = validize_mem (decl_rtl);
10604 
10605       /* If DECL_RTL is memory, we are in the normal case and the
10606 	 address is not valid, get the address into a register.  */
10607 
10608       else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
10609 	{
10610 	  if (alt_rtl)
10611 	    *alt_rtl = decl_rtl;
10612 	  decl_rtl = use_anchored_address (decl_rtl);
10613 	  if (modifier != EXPAND_CONST_ADDRESS
10614 	      && modifier != EXPAND_SUM
10615 	      && !memory_address_addr_space_p (exp ? DECL_MODE (exp)
10616 					       : GET_MODE (decl_rtl),
10617 					       XEXP (decl_rtl, 0),
10618 					       MEM_ADDR_SPACE (decl_rtl)))
10619 	    temp = replace_equiv_address (decl_rtl,
10620 					  copy_rtx (XEXP (decl_rtl, 0)));
10621 	}
10622 
10623       /* If we got something, return it.  But first, set the alignment
10624 	 if the address is a register.  */
10625       if (temp != 0)
10626 	{
10627 	  if (exp && MEM_P (temp) && REG_P (XEXP (temp, 0)))
10628 	    mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
10629 	}
10630       else if (MEM_P (decl_rtl))
10631 	temp = decl_rtl;
10632 
10633       if (temp != 0)
10634 	{
10635 	  if (MEM_P (temp)
10636 	      && modifier != EXPAND_WRITE
10637 	      && modifier != EXPAND_MEMORY
10638 	      && modifier != EXPAND_INITIALIZER
10639 	      && modifier != EXPAND_CONST_ADDRESS
10640 	      && modifier != EXPAND_SUM
10641 	      && !inner_reference_p
10642 	      && mode != BLKmode
10643 	      && MEM_ALIGN (temp) < GET_MODE_ALIGNMENT (mode))
10644 	    temp = expand_misaligned_mem_ref (temp, mode, unsignedp,
10645 					      MEM_ALIGN (temp), NULL_RTX, NULL);
10646 
10647 	  return temp;
10648 	}
10649 
10650       if (exp)
10651 	dmode = DECL_MODE (exp);
10652       else
10653 	dmode = TYPE_MODE (TREE_TYPE (ssa_name));
10654 
10655       /* If the mode of DECL_RTL does not match that of the decl,
10656 	 there are two cases: we are dealing with a BLKmode value
10657 	 that is returned in a register, or we are dealing with
10658 	 a promoted value.  In the latter case, return a SUBREG
10659 	 of the wanted mode, but mark it so that we know that it
10660 	 was already extended.  */
10661       if (REG_P (decl_rtl)
10662 	  && dmode != BLKmode
10663 	  && GET_MODE (decl_rtl) != dmode)
10664 	{
10665 	  machine_mode pmode;
10666 
10667 	  /* Get the signedness to be used for this variable.  Ensure we get
10668 	     the same mode we got when the variable was declared.  */
10669 	  if (code != SSA_NAME)
10670 	    pmode = promote_decl_mode (exp, &unsignedp);
10671 	  else if ((g = SSA_NAME_DEF_STMT (ssa_name))
10672 		   && gimple_code (g) == GIMPLE_CALL
10673 		   && !gimple_call_internal_p (g))
10674 	    pmode = promote_function_mode (type, mode, &unsignedp,
10675 					   gimple_call_fntype (g),
10676 					   2);
10677 	  else
10678 	    pmode = promote_ssa_mode (ssa_name, &unsignedp);
10679 	  gcc_assert (GET_MODE (decl_rtl) == pmode);
10680 
10681 	  temp = gen_lowpart_SUBREG (mode, decl_rtl);
10682 	  SUBREG_PROMOTED_VAR_P (temp) = 1;
10683 	  SUBREG_PROMOTED_SET (temp, unsignedp);
10684 	  return temp;
10685 	}
10686 
10687       return decl_rtl;
10688 
10689     case INTEGER_CST:
10690       {
10691 	/* Given that TYPE_PRECISION (type) is not always equal to
10692 	   GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10693 	   the former to the latter according to the signedness of the
10694 	   type.  */
10695 	scalar_int_mode int_mode = SCALAR_INT_TYPE_MODE (type);
10696 	temp = immed_wide_int_const
10697 	  (wi::to_wide (exp, GET_MODE_PRECISION (int_mode)), int_mode);
10698 	return temp;
10699       }
10700 
10701     case VECTOR_CST:
10702       {
10703 	tree tmp = NULL_TREE;
10704 	if (VECTOR_MODE_P (mode))
10705 	  return const_vector_from_tree (exp);
10706 	scalar_int_mode int_mode;
10707 	if (is_int_mode (mode, &int_mode))
10708 	  {
10709 	    tree type_for_mode = lang_hooks.types.type_for_mode (int_mode, 1);
10710 	    if (type_for_mode)
10711 	      tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
10712 				    type_for_mode, exp);
10713 	  }
10714 	if (!tmp)
10715 	  {
10716 	    vec<constructor_elt, va_gc> *v;
10717 	    /* Constructors need to be fixed-length.  FIXME.  */
10718 	    unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
10719 	    vec_alloc (v, nunits);
10720 	    for (unsigned int i = 0; i < nunits; ++i)
10721 	      CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
10722 	    tmp = build_constructor (type, v);
10723 	  }
10724 	return expand_expr (tmp, ignore ? const0_rtx : target,
10725 			    tmode, modifier);
10726       }
10727 
10728     case CONST_DECL:
10729       if (modifier == EXPAND_WRITE)
10730 	{
10731 	  /* Writing into CONST_DECL is always invalid, but handle it
10732 	     gracefully.  */
10733 	  addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
10734 	  scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
10735 	  op0 = expand_expr_addr_expr_1 (exp, NULL_RTX, address_mode,
10736 					 EXPAND_NORMAL, as);
10737 	  op0 = memory_address_addr_space (mode, op0, as);
10738 	  temp = gen_rtx_MEM (mode, op0);
10739 	  set_mem_addr_space (temp, as);
10740 	  return temp;
10741 	}
10742       return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
10743 
10744     case REAL_CST:
10745       /* If optimized, generate immediate CONST_DOUBLE
10746 	 which will be turned into memory by reload if necessary.
10747 
10748 	 We used to force a register so that loop.c could see it.  But
10749 	 this does not allow gen_* patterns to perform optimizations with
10750 	 the constants.  It also produces two insns in cases like "x = 1.0;".
10751 	 On most machines, floating-point constants are not permitted in
10752 	 many insns, so we'd end up copying it to a register in any case.
10753 
10754 	 Now, we do the copying in expand_binop, if appropriate.  */
10755       return const_double_from_real_value (TREE_REAL_CST (exp),
10756 					   TYPE_MODE (TREE_TYPE (exp)));
10757 
10758     case FIXED_CST:
10759       return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp),
10760 					   TYPE_MODE (TREE_TYPE (exp)));
10761 
10762     case COMPLEX_CST:
10763       /* Handle evaluating a complex constant in a CONCAT target.  */
10764       if (original_target && GET_CODE (original_target) == CONCAT)
10765 	{
10766 	  rtx rtarg, itarg;
10767 
10768 	  mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
10769 	  rtarg = XEXP (original_target, 0);
10770 	  itarg = XEXP (original_target, 1);
10771 
10772 	  /* Move the real and imaginary parts separately.  */
10773 	  op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
10774 	  op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
10775 
10776 	  if (op0 != rtarg)
10777 	    emit_move_insn (rtarg, op0);
10778 	  if (op1 != itarg)
10779 	    emit_move_insn (itarg, op1);
10780 
10781 	  return original_target;
10782 	}
10783 
10784       /* fall through */
10785 
10786     case STRING_CST:
10787       temp = expand_expr_constant (exp, 1, modifier);
10788 
10789       /* temp contains a constant address.
10790 	 On RISC machines where a constant address isn't valid,
10791 	 make some insns to get that address into a register.  */
10792       if (modifier != EXPAND_CONST_ADDRESS
10793 	  && modifier != EXPAND_INITIALIZER
10794 	  && modifier != EXPAND_SUM
10795 	  && ! memory_address_addr_space_p (mode, XEXP (temp, 0),
10796 					    MEM_ADDR_SPACE (temp)))
10797 	return replace_equiv_address (temp,
10798 				      copy_rtx (XEXP (temp, 0)));
10799       return temp;
10800 
10801     case POLY_INT_CST:
10802       return immed_wide_int_const (poly_int_cst_value (exp), mode);
10803 
10804     case SAVE_EXPR:
10805       {
10806 	tree val = treeop0;
10807 	rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl,
10808 				      inner_reference_p);
10809 
10810 	if (!SAVE_EXPR_RESOLVED_P (exp))
10811 	  {
10812 	    /* We can indeed still hit this case, typically via builtin
10813 	       expanders calling save_expr immediately before expanding
10814 	       something.  Assume this means that we only have to deal
10815 	       with non-BLKmode values.  */
10816 	    gcc_assert (GET_MODE (ret) != BLKmode);
10817 
10818 	    val = build_decl (curr_insn_location (),
10819 			      VAR_DECL, NULL, TREE_TYPE (exp));
10820 	    DECL_ARTIFICIAL (val) = 1;
10821 	    DECL_IGNORED_P (val) = 1;
10822 	    treeop0 = val;
10823 	    TREE_OPERAND (exp, 0) = treeop0;
10824 	    SAVE_EXPR_RESOLVED_P (exp) = 1;
10825 
10826 	    if (!CONSTANT_P (ret))
10827 	      ret = copy_to_reg (ret);
10828 	    SET_DECL_RTL (val, ret);
10829 	  }
10830 
10831         return ret;
10832       }
10833 
10834 
10835     case CONSTRUCTOR:
10836       /* If we don't need the result, just ensure we evaluate any
10837 	 subexpressions.  */
10838       if (ignore)
10839 	{
10840 	  unsigned HOST_WIDE_INT idx;
10841 	  tree value;
10842 
10843 	  FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
10844 	    expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
10845 
10846 	  return const0_rtx;
10847 	}
10848 
10849       return expand_constructor (exp, target, modifier, false);
10850 
10851     case TARGET_MEM_REF:
10852       {
10853 	addr_space_t as
10854 	  = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10855 	unsigned int align;
10856 
10857 	op0 = addr_for_mem_ref (exp, as, true);
10858 	op0 = memory_address_addr_space (mode, op0, as);
10859 	temp = gen_rtx_MEM (mode, op0);
10860 	set_mem_attributes (temp, exp, 0);
10861 	set_mem_addr_space (temp, as);
10862 	align = get_object_alignment (exp);
10863 	if (modifier != EXPAND_WRITE
10864 	    && modifier != EXPAND_MEMORY
10865 	    && mode != BLKmode
10866 	    && align < GET_MODE_ALIGNMENT (mode))
10867 	  temp = expand_misaligned_mem_ref (temp, mode, unsignedp,
10868 					    align, NULL_RTX, NULL);
10869 	return temp;
10870       }
10871 
10872     case MEM_REF:
10873       {
10874 	const bool reverse = REF_REVERSE_STORAGE_ORDER (exp);
10875 	addr_space_t as
10876 	  = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10877 	machine_mode address_mode;
10878 	tree base = TREE_OPERAND (exp, 0);
10879 	gimple *def_stmt;
10880 	unsigned align;
10881 	/* Handle expansion of non-aliased memory with non-BLKmode.  That
10882 	   might end up in a register.  */
10883 	if (mem_ref_refers_to_non_mem_p (exp))
10884 	  {
10885 	    poly_int64 offset = mem_ref_offset (exp).force_shwi ();
10886 	    base = TREE_OPERAND (base, 0);
10887 	    poly_uint64 type_size;
10888 	    if (known_eq (offset, 0)
10889 	        && !reverse
10890 		&& poly_int_tree_p (TYPE_SIZE (type), &type_size)
10891 		&& known_eq (GET_MODE_BITSIZE (DECL_MODE (base)), type_size))
10892 	      return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
10893 				  target, tmode, modifier);
10894 	    if (TYPE_MODE (type) == BLKmode)
10895 	      {
10896 		temp = assign_stack_temp (DECL_MODE (base),
10897 					  GET_MODE_SIZE (DECL_MODE (base)));
10898 		store_expr (base, temp, 0, false, false);
10899 		temp = adjust_address (temp, BLKmode, offset);
10900 		set_mem_size (temp, int_size_in_bytes (type));
10901 		return temp;
10902 	      }
10903 	    exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
10904 			  bitsize_int (offset * BITS_PER_UNIT));
10905 	    REF_REVERSE_STORAGE_ORDER (exp) = reverse;
10906 	    return expand_expr (exp, target, tmode, modifier);
10907 	  }
10908 	address_mode = targetm.addr_space.address_mode (as);
10909 	if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
10910 	  {
10911 	    tree mask = gimple_assign_rhs2 (def_stmt);
10912 	    base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
10913 			   gimple_assign_rhs1 (def_stmt), mask);
10914 	    TREE_OPERAND (exp, 0) = base;
10915 	  }
10916 	align = get_object_alignment (exp);
10917 	op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
10918 	op0 = memory_address_addr_space (mode, op0, as);
10919 	if (!integer_zerop (TREE_OPERAND (exp, 1)))
10920 	  {
10921 	    rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
10922 	    op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
10923 	    op0 = memory_address_addr_space (mode, op0, as);
10924 	  }
10925 	temp = gen_rtx_MEM (mode, op0);
10926 	set_mem_attributes (temp, exp, 0);
10927 	set_mem_addr_space (temp, as);
10928 	if (TREE_THIS_VOLATILE (exp))
10929 	  MEM_VOLATILE_P (temp) = 1;
10930 	if (modifier != EXPAND_WRITE
10931 	    && modifier != EXPAND_MEMORY
10932 	    && !inner_reference_p
10933 	    && mode != BLKmode
10934 	    && align < GET_MODE_ALIGNMENT (mode))
10935 	  temp = expand_misaligned_mem_ref (temp, mode, unsignedp, align,
10936 					    modifier == EXPAND_STACK_PARM
10937 					    ? NULL_RTX : target, alt_rtl);
10938 	if (reverse
10939 	    && modifier != EXPAND_MEMORY
10940 	    && modifier != EXPAND_WRITE)
10941 	  temp = flip_storage_order (mode, temp);
10942 	return temp;
10943       }
10944 
10945     case ARRAY_REF:
10946 
10947       {
10948 	tree array = treeop0;
10949 	tree index = treeop1;
10950 	tree init;
10951 
10952 	/* Fold an expression like: "foo"[2].
10953 	   This is not done in fold so it won't happen inside &.
10954 	   Don't fold if this is for wide characters since it's too
10955 	   difficult to do correctly and this is a very rare case.  */
10956 
10957 	if (modifier != EXPAND_CONST_ADDRESS
10958 	    && modifier != EXPAND_INITIALIZER
10959 	    && modifier != EXPAND_MEMORY)
10960 	  {
10961 	    tree t = fold_read_from_constant_string (exp);
10962 
10963 	    if (t)
10964 	      return expand_expr (t, target, tmode, modifier);
10965 	  }
10966 
10967 	/* If this is a constant index into a constant array,
10968 	   just get the value from the array.  Handle both the cases when
10969 	   we have an explicit constructor and when our operand is a variable
10970 	   that was declared const.  */
10971 
10972 	if (modifier != EXPAND_CONST_ADDRESS
10973 	    && modifier != EXPAND_INITIALIZER
10974 	    && modifier != EXPAND_MEMORY
10975 	    && TREE_CODE (array) == CONSTRUCTOR
10976 	    && ! TREE_SIDE_EFFECTS (array)
10977 	    && TREE_CODE (index) == INTEGER_CST)
10978 	  {
10979 	    unsigned HOST_WIDE_INT ix;
10980 	    tree field, value;
10981 
10982 	    FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
10983 				      field, value)
10984 	      if (tree_int_cst_equal (field, index))
10985 		{
10986 		  if (!TREE_SIDE_EFFECTS (value))
10987 		    return expand_expr (fold (value), target, tmode, modifier);
10988 		  break;
10989 		}
10990 	  }
10991 
10992 	else if (optimize >= 1
10993 		 && modifier != EXPAND_CONST_ADDRESS
10994 		 && modifier != EXPAND_INITIALIZER
10995 		 && modifier != EXPAND_MEMORY
10996 		 && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
10997 		 && TREE_CODE (index) == INTEGER_CST
10998 		 && (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
10999 		 && (init = ctor_for_folding (array)) != error_mark_node)
11000 	  {
11001 	    if (init == NULL_TREE)
11002 	      {
11003 		tree value = build_zero_cst (type);
11004 		if (TREE_CODE (value) == CONSTRUCTOR)
11005 		  {
11006 		    /* If VALUE is a CONSTRUCTOR, this optimization is only
11007 		       useful if this doesn't store the CONSTRUCTOR into
11008 		       memory.  If it does, it is more efficient to just
11009 		       load the data from the array directly.  */
11010 		    rtx ret = expand_constructor (value, target,
11011 						  modifier, true);
11012 		    if (ret == NULL_RTX)
11013 		      value = NULL_TREE;
11014 		  }
11015 
11016 		if (value)
11017 		  return expand_expr (value, target, tmode, modifier);
11018 	      }
11019 	    else if (TREE_CODE (init) == CONSTRUCTOR)
11020 	      {
11021 		unsigned HOST_WIDE_INT ix;
11022 		tree field, value;
11023 
11024 		FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
11025 					  field, value)
11026 		  if (tree_int_cst_equal (field, index))
11027 		    {
11028 		      if (TREE_SIDE_EFFECTS (value))
11029 			break;
11030 
11031 		      if (TREE_CODE (value) == CONSTRUCTOR)
11032 			{
11033 			  /* If VALUE is a CONSTRUCTOR, this
11034 			     optimization is only useful if
11035 			     this doesn't store the CONSTRUCTOR
11036 			     into memory.  If it does, it is more
11037 			     efficient to just load the data from
11038 			     the array directly.  */
11039 			  rtx ret = expand_constructor (value, target,
11040 							modifier, true);
11041 			  if (ret == NULL_RTX)
11042 			    break;
11043 			}
11044 
11045 		      return
11046 		        expand_expr (fold (value), target, tmode, modifier);
11047 		    }
11048 	      }
11049 	    else if (TREE_CODE (init) == STRING_CST)
11050 	      {
11051 		tree low_bound = array_ref_low_bound (exp);
11052 		tree index1 = fold_convert_loc (loc, sizetype, treeop1);
11053 
11054 		/* Optimize the special case of a zero lower bound.
11055 
11056 		   We convert the lower bound to sizetype to avoid problems
11057 		   with constant folding.  E.g. suppose the lower bound is
11058 		   1 and its mode is QI.  Without the conversion
11059 		      (ARRAY + (INDEX - (unsigned char)1))
11060 		   becomes
11061 		      (ARRAY + (-(unsigned char)1) + INDEX)
11062 		   which becomes
11063 		      (ARRAY + 255 + INDEX).  Oops!  */
11064 		if (!integer_zerop (low_bound))
11065 		  index1 = size_diffop_loc (loc, index1,
11066 					    fold_convert_loc (loc, sizetype,
11067 							      low_bound));
11068 
11069 		if (tree_fits_uhwi_p (index1)
11070 		    && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
11071 		  {
11072 		    tree char_type = TREE_TYPE (TREE_TYPE (init));
11073 		    scalar_int_mode char_mode;
11074 
11075 		    if (is_int_mode (TYPE_MODE (char_type), &char_mode)
11076 			&& GET_MODE_SIZE (char_mode) == 1)
11077 		      return gen_int_mode (TREE_STRING_POINTER (init)
11078 					   [TREE_INT_CST_LOW (index1)],
11079 					   char_mode);
11080 		  }
11081 	      }
11082 	  }
11083       }
11084       goto normal_inner_ref;
11085 
11086     case COMPONENT_REF:
11087       gcc_assert (TREE_CODE (treeop0) != CONSTRUCTOR);
11088       /* Fall through.  */
11089     case BIT_FIELD_REF:
11090     case ARRAY_RANGE_REF:
11091     normal_inner_ref:
11092       {
11093 	machine_mode mode1, mode2;
11094 	poly_int64 bitsize, bitpos, bytepos;
11095 	tree offset;
11096 	int reversep, volatilep = 0, must_force_mem;
11097 	tree tem
11098 	  = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
11099 				 &unsignedp, &reversep, &volatilep);
11100 	rtx orig_op0, memloc;
11101 	bool clear_mem_expr = false;
11102 
11103 	/* If we got back the original object, something is wrong.  Perhaps
11104 	   we are evaluating an expression too early.  In any event, don't
11105 	   infinitely recurse.  */
11106 	gcc_assert (tem != exp);
11107 
11108 	/* If TEM's type is a union of variable size, pass TARGET to the inner
11109 	   computation, since it will need a temporary and TARGET is known
11110 	   to have to do.  This occurs in unchecked conversion in Ada.  */
11111 	orig_op0 = op0
11112 	  = expand_expr_real (tem,
11113 			      (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11114 			       && COMPLETE_TYPE_P (TREE_TYPE (tem))
11115 			       && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11116 				   != INTEGER_CST)
11117 			       && modifier != EXPAND_STACK_PARM
11118 			       ? target : NULL_RTX),
11119 			      VOIDmode,
11120 			      modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11121 			      NULL, true);
11122 
11123 	/* If the field has a mode, we want to access it in the
11124 	   field's mode, not the computed mode.
11125 	   If a MEM has VOIDmode (external with incomplete type),
11126 	   use BLKmode for it instead.  */
11127 	if (MEM_P (op0))
11128 	  {
11129 	    if (mode1 != VOIDmode)
11130 	      op0 = adjust_address (op0, mode1, 0);
11131 	    else if (GET_MODE (op0) == VOIDmode)
11132 	      op0 = adjust_address (op0, BLKmode, 0);
11133 	  }
11134 
11135 	mode2
11136 	  = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
11137 
11138 	/* Make sure bitpos is not negative, it can wreak havoc later.  */
11139 	if (maybe_lt (bitpos, 0))
11140 	  {
11141 	    gcc_checking_assert (offset == NULL_TREE);
11142 	    offset = size_int (bits_to_bytes_round_down (bitpos));
11143 	    bitpos = num_trailing_bits (bitpos);
11144 	  }
11145 
11146 	/* If we have either an offset, a BLKmode result, or a reference
11147 	   outside the underlying object, we must force it to memory.
11148 	   Such a case can occur in Ada if we have unchecked conversion
11149 	   of an expression from a scalar type to an aggregate type or
11150 	   for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
11151 	   passed a partially uninitialized object or a view-conversion
11152 	   to a larger size.  */
11153 	must_force_mem = (offset
11154 			  || mode1 == BLKmode
11155 			  || (mode == BLKmode
11156 			      && !int_mode_for_size (bitsize, 1).exists ())
11157 			  || maybe_gt (bitpos + bitsize,
11158 				       GET_MODE_BITSIZE (mode2)));
11159 
11160 	/* Handle CONCAT first.  */
11161 	if (GET_CODE (op0) == CONCAT && !must_force_mem)
11162 	  {
11163 	    if (known_eq (bitpos, 0)
11164 		&& known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
11165 		&& COMPLEX_MODE_P (mode1)
11166 		&& COMPLEX_MODE_P (GET_MODE (op0))
11167 		&& (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
11168 		    == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0)))))
11169 	      {
11170 		if (reversep)
11171 		  op0 = flip_storage_order (GET_MODE (op0), op0);
11172 		if (mode1 != GET_MODE (op0))
11173 		  {
11174 		    rtx parts[2];
11175 		    for (int i = 0; i < 2; i++)
11176 		      {
11177 			rtx op = read_complex_part (op0, i != 0);
11178 			if (GET_CODE (op) == SUBREG)
11179 			  op = force_reg (GET_MODE (op), op);
11180 			temp = gen_lowpart_common (GET_MODE_INNER (mode1), op);
11181 			if (temp)
11182 			  op = temp;
11183 			else
11184 			  {
11185 			    if (!REG_P (op) && !MEM_P (op))
11186 			      op = force_reg (GET_MODE (op), op);
11187 			    op = gen_lowpart (GET_MODE_INNER (mode1), op);
11188 			  }
11189 			parts[i] = op;
11190 		      }
11191 		    op0 = gen_rtx_CONCAT (mode1, parts[0], parts[1]);
11192 		  }
11193 		return op0;
11194 	      }
11195 	    if (known_eq (bitpos, 0)
11196 		&& known_eq (bitsize,
11197 			     GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
11198 		&& maybe_ne (bitsize, 0))
11199 	      {
11200 		op0 = XEXP (op0, 0);
11201 		mode2 = GET_MODE (op0);
11202 	      }
11203 	    else if (known_eq (bitpos,
11204 			       GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
11205 		     && known_eq (bitsize,
11206 				  GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
11207 		     && maybe_ne (bitpos, 0)
11208 		     && maybe_ne (bitsize, 0))
11209 	      {
11210 		op0 = XEXP (op0, 1);
11211 		bitpos = 0;
11212 		mode2 = GET_MODE (op0);
11213 	      }
11214 	    else
11215 	      /* Otherwise force into memory.  */
11216 	      must_force_mem = 1;
11217 	  }
11218 
11219 	/* If this is a constant, put it in a register if it is a legitimate
11220 	   constant and we don't need a memory reference.  */
11221 	if (CONSTANT_P (op0)
11222 	    && mode2 != BLKmode
11223 	    && targetm.legitimate_constant_p (mode2, op0)
11224 	    && !must_force_mem)
11225 	  op0 = force_reg (mode2, op0);
11226 
11227 	/* Otherwise, if this is a constant, try to force it to the constant
11228 	   pool.  Note that back-ends, e.g. MIPS, may refuse to do so if it
11229 	   is a legitimate constant.  */
11230 	else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
11231 	  op0 = validize_mem (memloc);
11232 
11233 	/* Otherwise, if this is a constant or the object is not in memory
11234 	   and need be, put it there.  */
11235 	else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
11236 	  {
11237 	    memloc = assign_temp (TREE_TYPE (tem), 1, 1);
11238 	    emit_move_insn (memloc, op0);
11239 	    op0 = memloc;
11240 	    clear_mem_expr = true;
11241 	  }
11242 
11243 	if (offset)
11244 	  {
11245 	    machine_mode address_mode;
11246 	    rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
11247 					  EXPAND_SUM);
11248 
11249 	    gcc_assert (MEM_P (op0));
11250 
11251 	    address_mode = get_address_mode (op0);
11252 	    if (GET_MODE (offset_rtx) != address_mode)
11253 	      {
11254 		/* We cannot be sure that the RTL in offset_rtx is valid outside
11255 		   of a memory address context, so force it into a register
11256 		   before attempting to convert it to the desired mode.  */
11257 		offset_rtx = force_operand (offset_rtx, NULL_RTX);
11258 		offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
11259 	      }
11260 
11261 	    /* See the comment in expand_assignment for the rationale.  */
11262 	    if (mode1 != VOIDmode
11263 		&& maybe_ne (bitpos, 0)
11264 		&& maybe_gt (bitsize, 0)
11265 		&& multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11266 		&& multiple_p (bitpos, bitsize)
11267 		&& multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
11268 		&& MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
11269 	      {
11270 		op0 = adjust_address (op0, mode1, bytepos);
11271 		bitpos = 0;
11272 	      }
11273 
11274 	    op0 = offset_address (op0, offset_rtx,
11275 				  highest_pow2_factor (offset));
11276 	  }
11277 
11278 	/* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
11279 	   record its alignment as BIGGEST_ALIGNMENT.  */
11280 	if (MEM_P (op0)
11281 	    && known_eq (bitpos, 0)
11282 	    && offset != 0
11283 	    && is_aligning_offset (offset, tem))
11284 	  set_mem_align (op0, BIGGEST_ALIGNMENT);
11285 
11286 	/* Don't forget about volatility even if this is a bitfield.  */
11287 	if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
11288 	  {
11289 	    if (op0 == orig_op0)
11290 	      op0 = copy_rtx (op0);
11291 
11292 	    MEM_VOLATILE_P (op0) = 1;
11293 	  }
11294 
11295 	if (MEM_P (op0) && TREE_CODE (tem) == FUNCTION_DECL)
11296 	  {
11297 	    if (op0 == orig_op0)
11298 	      op0 = copy_rtx (op0);
11299 
11300 	    set_mem_align (op0, BITS_PER_UNIT);
11301 	  }
11302 
11303 	/* In cases where an aligned union has an unaligned object
11304 	   as a field, we might be extracting a BLKmode value from
11305 	   an integer-mode (e.g., SImode) object.  Handle this case
11306 	   by doing the extract into an object as wide as the field
11307 	   (which we know to be the width of a basic mode), then
11308 	   storing into memory, and changing the mode to BLKmode.  */
11309 	if (mode1 == VOIDmode
11310 	    || REG_P (op0) || GET_CODE (op0) == SUBREG
11311 	    || (mode1 != BLKmode && ! direct_load[(int) mode1]
11312 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
11313 		&& GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
11314 		&& modifier != EXPAND_CONST_ADDRESS
11315 		&& modifier != EXPAND_INITIALIZER
11316 		&& modifier != EXPAND_MEMORY)
11317 	    /* If the bitfield is volatile and the bitsize
11318 	       is narrower than the access size of the bitfield,
11319 	       we need to extract bitfields from the access.  */
11320 	    || (volatilep && TREE_CODE (exp) == COMPONENT_REF
11321 		&& DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
11322 		&& mode1 != BLKmode
11323 		&& maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
11324 	    /* If the field isn't aligned enough to fetch as a memref,
11325 	       fetch it as a bit field.  */
11326 	    || (mode1 != BLKmode
11327 		&& (((MEM_P (op0)
11328 		      ? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
11329 			|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
11330 		      : TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
11331 			|| !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
11332 		     && modifier != EXPAND_MEMORY
11333 		     && ((modifier == EXPAND_CONST_ADDRESS
11334 			  || modifier == EXPAND_INITIALIZER)
11335 			 ? STRICT_ALIGNMENT
11336 			 : targetm.slow_unaligned_access (mode1,
11337 							  MEM_ALIGN (op0))))
11338 		    || !multiple_p (bitpos, BITS_PER_UNIT)))
11339 	    /* If the type and the field are a constant size and the
11340 	       size of the type isn't the same size as the bitfield,
11341 	       we must use bitfield operations.  */
11342 	    || (known_size_p (bitsize)
11343 		&& TYPE_SIZE (TREE_TYPE (exp))
11344 		&& poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
11345 		&& maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
11346 			     bitsize)))
11347 	  {
11348 	    machine_mode ext_mode = mode;
11349 
11350 	    if (ext_mode == BLKmode
11351 		&& ! (target != 0 && MEM_P (op0)
11352 		      && MEM_P (target)
11353 		      && multiple_p (bitpos, BITS_PER_UNIT)))
11354 	      ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
11355 
11356 	    if (ext_mode == BLKmode)
11357 	      {
11358 		if (target == 0)
11359 		  target = assign_temp (type, 1, 1);
11360 
11361 		/* ??? Unlike the similar test a few lines below, this one is
11362 		   very likely obsolete.  */
11363 		if (known_eq (bitsize, 0))
11364 		  return target;
11365 
11366 		/* In this case, BITPOS must start at a byte boundary and
11367 		   TARGET, if specified, must be a MEM.  */
11368 		gcc_assert (MEM_P (op0)
11369 			    && (!target || MEM_P (target)));
11370 
11371 		bytepos = exact_div (bitpos, BITS_PER_UNIT);
11372 		poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
11373 		emit_block_move (target,
11374 				 adjust_address (op0, VOIDmode, bytepos),
11375 				 gen_int_mode (bytesize, Pmode),
11376 				 (modifier == EXPAND_STACK_PARM
11377 				  ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
11378 
11379 		return target;
11380 	      }
11381 
11382 	    /* If we have nothing to extract, the result will be 0 for targets
11383 	       with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise.  Always
11384 	       return 0 for the sake of consistency, as reading a zero-sized
11385 	       bitfield is valid in Ada and the value is fully specified.  */
11386 	    if (known_eq (bitsize, 0))
11387 	      return const0_rtx;
11388 
11389 	    op0 = validize_mem (op0);
11390 
11391 	    if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
11392 	      mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11393 
11394 	    /* If the result has aggregate type and the extraction is done in
11395 	       an integral mode, then the field may be not aligned on a byte
11396 	       boundary; in this case, if it has reverse storage order, it
11397 	       needs to be extracted as a scalar field with reverse storage
11398 	       order and put back into memory order afterwards.  */
11399 	    if (AGGREGATE_TYPE_P (type)
11400 		&& GET_MODE_CLASS (ext_mode) == MODE_INT)
11401 	      reversep = TYPE_REVERSE_STORAGE_ORDER (type);
11402 
11403 	    gcc_checking_assert (known_ge (bitpos, 0));
11404 	    op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
11405 				     (modifier == EXPAND_STACK_PARM
11406 				      ? NULL_RTX : target),
11407 				     ext_mode, ext_mode, reversep, alt_rtl);
11408 
11409 	    /* If the result has aggregate type and the mode of OP0 is an
11410 	       integral mode then, if BITSIZE is narrower than this mode
11411 	       and this is for big-endian data, we must put the field
11412 	       into the high-order bits.  And we must also put it back
11413 	       into memory order if it has been previously reversed.  */
11414 	    scalar_int_mode op0_mode;
11415 	    if (AGGREGATE_TYPE_P (type)
11416 		&& is_int_mode (GET_MODE (op0), &op0_mode))
11417 	      {
11418 		HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
11419 
11420 		gcc_checking_assert (known_le (bitsize, size));
11421 		if (maybe_lt (bitsize, size)
11422 		    && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
11423 		  op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
11424 				      size - bitsize, op0, 1);
11425 
11426 		if (reversep)
11427 		  op0 = flip_storage_order (op0_mode, op0);
11428 	      }
11429 
11430 	    /* If the result type is BLKmode, store the data into a temporary
11431 	       of the appropriate type, but with the mode corresponding to the
11432 	       mode for the data we have (op0's mode).  */
11433 	    if (mode == BLKmode)
11434 	      {
11435 		rtx new_rtx
11436 		  = assign_stack_temp_for_type (ext_mode,
11437 						GET_MODE_BITSIZE (ext_mode),
11438 						type);
11439 		emit_move_insn (new_rtx, op0);
11440 		op0 = copy_rtx (new_rtx);
11441 		PUT_MODE (op0, BLKmode);
11442 	      }
11443 
11444 	    return op0;
11445 	  }
11446 
11447 	/* If the result is BLKmode, use that to access the object
11448 	   now as well.  */
11449 	if (mode == BLKmode)
11450 	  mode1 = BLKmode;
11451 
11452 	/* Get a reference to just this component.  */
11453 	bytepos = bits_to_bytes_round_down (bitpos);
11454 	if (modifier == EXPAND_CONST_ADDRESS
11455 	    || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
11456 	  op0 = adjust_address_nv (op0, mode1, bytepos);
11457 	else
11458 	  op0 = adjust_address (op0, mode1, bytepos);
11459 
11460 	if (op0 == orig_op0)
11461 	  op0 = copy_rtx (op0);
11462 
11463 	/* Don't set memory attributes if the base expression is
11464 	   SSA_NAME that got expanded as a MEM or a CONSTANT.  In that case,
11465 	   we should just honor its original memory attributes.  */
11466 	if (!(TREE_CODE (tem) == SSA_NAME
11467 	      && (MEM_P (orig_op0) || CONSTANT_P (orig_op0))))
11468 	  set_mem_attributes (op0, exp, 0);
11469 
11470 	if (REG_P (XEXP (op0, 0)))
11471 	  mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11472 
11473 	/* If op0 is a temporary because the original expressions was forced
11474 	   to memory, clear MEM_EXPR so that the original expression cannot
11475 	   be marked as addressable through MEM_EXPR of the temporary.  */
11476 	if (clear_mem_expr)
11477 	  set_mem_expr (op0, NULL_TREE);
11478 
11479 	MEM_VOLATILE_P (op0) |= volatilep;
11480 
11481         if (reversep
11482 	    && modifier != EXPAND_MEMORY
11483 	    && modifier != EXPAND_WRITE)
11484 	  op0 = flip_storage_order (mode1, op0);
11485 
11486 	if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
11487 	    || modifier == EXPAND_CONST_ADDRESS
11488 	    || modifier == EXPAND_INITIALIZER)
11489 	  return op0;
11490 
11491 	if (target == 0)
11492 	  target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
11493 
11494 	convert_move (target, op0, unsignedp);
11495 	return target;
11496       }
11497 
11498     case OBJ_TYPE_REF:
11499       return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
11500 
11501     case CALL_EXPR:
11502       /* All valid uses of __builtin_va_arg_pack () are removed during
11503 	 inlining.  */
11504       if (CALL_EXPR_VA_ARG_PACK (exp))
11505 	error ("invalid use of %<__builtin_va_arg_pack ()%>");
11506       {
11507 	tree fndecl = get_callee_fndecl (exp), attr;
11508 
11509 	if (fndecl
11510 	    /* Don't diagnose the error attribute in thunks, those are
11511 	       artificially created.  */
11512 	    && !CALL_FROM_THUNK_P (exp)
11513 	    && (attr = lookup_attribute ("error",
11514 					 DECL_ATTRIBUTES (fndecl))) != NULL)
11515 	  {
11516 	    const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11517 	    error ("call to %qs declared with attribute error: %s",
11518 		   identifier_to_locale (ident),
11519 		   TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11520 	  }
11521 	if (fndecl
11522 	    /* Don't diagnose the warning attribute in thunks, those are
11523 	       artificially created.  */
11524 	    && !CALL_FROM_THUNK_P (exp)
11525 	    && (attr = lookup_attribute ("warning",
11526 					 DECL_ATTRIBUTES (fndecl))) != NULL)
11527 	  {
11528 	    const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11529 	    warning_at (EXPR_LOCATION (exp),
11530 			OPT_Wattribute_warning,
11531 			"call to %qs declared with attribute warning: %s",
11532 			identifier_to_locale (ident),
11533 			TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11534 	  }
11535 
11536 	/* Check for a built-in function.  */
11537 	if (fndecl && fndecl_built_in_p (fndecl))
11538 	  {
11539 	    gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
11540 	    return expand_builtin (exp, target, subtarget, tmode, ignore);
11541 	  }
11542       }
11543       return expand_call (exp, target, ignore);
11544 
11545     case VIEW_CONVERT_EXPR:
11546       op0 = NULL_RTX;
11547 
11548       /* If we are converting to BLKmode, try to avoid an intermediate
11549 	 temporary by fetching an inner memory reference.  */
11550       if (mode == BLKmode
11551 	  && poly_int_tree_p (TYPE_SIZE (type))
11552 	  && TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
11553 	  && handled_component_p (treeop0))
11554       {
11555 	machine_mode mode1;
11556 	poly_int64 bitsize, bitpos, bytepos;
11557 	tree offset;
11558 	int reversep, volatilep = 0;
11559 	tree tem
11560 	  = get_inner_reference (treeop0, &bitsize, &bitpos, &offset, &mode1,
11561 				 &unsignedp, &reversep, &volatilep);
11562 
11563 	/* ??? We should work harder and deal with non-zero offsets.  */
11564 	if (!offset
11565 	    && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11566 	    && !reversep
11567 	    && known_size_p (bitsize)
11568 	    && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
11569 	  {
11570 	    /* See the normal_inner_ref case for the rationale.  */
11571 	    rtx orig_op0
11572 	      = expand_expr_real (tem,
11573 				  (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11574 				   && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11575 				       != INTEGER_CST)
11576 				   && modifier != EXPAND_STACK_PARM
11577 				   ? target : NULL_RTX),
11578 				  VOIDmode,
11579 				  modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11580 				  NULL, true);
11581 
11582 	    if (MEM_P (orig_op0))
11583 	      {
11584 		op0 = orig_op0;
11585 
11586 		/* Get a reference to just this component.  */
11587 		if (modifier == EXPAND_CONST_ADDRESS
11588 		    || modifier == EXPAND_SUM
11589 		    || modifier == EXPAND_INITIALIZER)
11590 		  op0 = adjust_address_nv (op0, mode, bytepos);
11591 		else
11592 		  op0 = adjust_address (op0, mode, bytepos);
11593 
11594 		if (op0 == orig_op0)
11595 		  op0 = copy_rtx (op0);
11596 
11597 		set_mem_attributes (op0, treeop0, 0);
11598 		if (REG_P (XEXP (op0, 0)))
11599 		  mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11600 
11601 		MEM_VOLATILE_P (op0) |= volatilep;
11602 	      }
11603 	  }
11604       }
11605 
11606       if (!op0)
11607 	op0 = expand_expr_real (treeop0, NULL_RTX, VOIDmode, modifier,
11608 				NULL, inner_reference_p);
11609 
11610       /* If the input and output modes are both the same, we are done.  */
11611       if (mode == GET_MODE (op0))
11612 	;
11613       /* If neither mode is BLKmode, and both modes are the same size
11614 	 then we can use gen_lowpart.  */
11615       else if (mode != BLKmode
11616 	       && GET_MODE (op0) != BLKmode
11617 	       && known_eq (GET_MODE_PRECISION (mode),
11618 			    GET_MODE_PRECISION (GET_MODE (op0)))
11619 	       && !COMPLEX_MODE_P (GET_MODE (op0)))
11620 	{
11621 	  if (GET_CODE (op0) == SUBREG)
11622 	    op0 = force_reg (GET_MODE (op0), op0);
11623 	  temp = gen_lowpart_common (mode, op0);
11624 	  if (temp)
11625 	    op0 = temp;
11626 	  else
11627 	    {
11628 	      if (!REG_P (op0) && !MEM_P (op0))
11629 		op0 = force_reg (GET_MODE (op0), op0);
11630 	      op0 = gen_lowpart (mode, op0);
11631 	    }
11632 	}
11633       /* If both types are integral, convert from one mode to the other.  */
11634       else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
11635 	op0 = convert_modes (mode, GET_MODE (op0), op0,
11636 			     TYPE_UNSIGNED (TREE_TYPE (treeop0)));
11637       /* If the output type is a bit-field type, do an extraction.  */
11638       else if (reduce_bit_field)
11639 	return extract_bit_field (op0, TYPE_PRECISION (type), 0,
11640 				  TYPE_UNSIGNED (type), NULL_RTX,
11641 				  mode, mode, false, NULL);
11642       /* As a last resort, spill op0 to memory, and reload it in a
11643 	 different mode.  */
11644       else if (!MEM_P (op0))
11645 	{
11646 	  /* If the operand is not a MEM, force it into memory.  Since we
11647 	     are going to be changing the mode of the MEM, don't call
11648 	     force_const_mem for constants because we don't allow pool
11649 	     constants to change mode.  */
11650 	  tree inner_type = TREE_TYPE (treeop0);
11651 
11652 	  gcc_assert (!TREE_ADDRESSABLE (exp));
11653 
11654 	  if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type))
11655 	    target
11656 	      = assign_stack_temp_for_type
11657 		(TYPE_MODE (inner_type),
11658 		 GET_MODE_SIZE (TYPE_MODE (inner_type)), inner_type);
11659 
11660 	  emit_move_insn (target, op0);
11661 	  op0 = target;
11662 	}
11663 
11664       /* If OP0 is (now) a MEM, we need to deal with alignment issues.  If the
11665 	 output type is such that the operand is known to be aligned, indicate
11666 	 that it is.  Otherwise, we need only be concerned about alignment for
11667 	 non-BLKmode results.  */
11668       if (MEM_P (op0))
11669 	{
11670 	  enum insn_code icode;
11671 
11672 	  if (modifier != EXPAND_WRITE
11673 	      && modifier != EXPAND_MEMORY
11674 	      && !inner_reference_p
11675 	      && mode != BLKmode
11676 	      && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode))
11677 	    {
11678 	      /* If the target does have special handling for unaligned
11679 		 loads of mode then use them.  */
11680 	      if ((icode = optab_handler (movmisalign_optab, mode))
11681 		  != CODE_FOR_nothing)
11682 		{
11683 		  rtx reg;
11684 
11685 		  op0 = adjust_address (op0, mode, 0);
11686 		  /* We've already validated the memory, and we're creating a
11687 		     new pseudo destination.  The predicates really can't
11688 		     fail.  */
11689 		  reg = gen_reg_rtx (mode);
11690 
11691 		  /* Nor can the insn generator.  */
11692 		  rtx_insn *insn = GEN_FCN (icode) (reg, op0);
11693 		  emit_insn (insn);
11694 		  return reg;
11695 		}
11696 	      else if (STRICT_ALIGNMENT)
11697 		{
11698 		  poly_uint64 mode_size = GET_MODE_SIZE (mode);
11699 		  poly_uint64 temp_size = mode_size;
11700 		  if (GET_MODE (op0) != BLKmode)
11701 		    temp_size = upper_bound (temp_size,
11702 					     GET_MODE_SIZE (GET_MODE (op0)));
11703 		  rtx new_rtx
11704 		    = assign_stack_temp_for_type (mode, temp_size, type);
11705 		  rtx new_with_op0_mode
11706 		    = adjust_address (new_rtx, GET_MODE (op0), 0);
11707 
11708 		  gcc_assert (!TREE_ADDRESSABLE (exp));
11709 
11710 		  if (GET_MODE (op0) == BLKmode)
11711 		    {
11712 		      rtx size_rtx = gen_int_mode (mode_size, Pmode);
11713 		      emit_block_move (new_with_op0_mode, op0, size_rtx,
11714 				       (modifier == EXPAND_STACK_PARM
11715 					? BLOCK_OP_CALL_PARM
11716 					: BLOCK_OP_NORMAL));
11717 		    }
11718 		  else
11719 		    emit_move_insn (new_with_op0_mode, op0);
11720 
11721 		  op0 = new_rtx;
11722 		}
11723 	    }
11724 
11725 	  op0 = adjust_address (op0, mode, 0);
11726 	}
11727 
11728       return op0;
11729 
11730     case MODIFY_EXPR:
11731       {
11732 	tree lhs = treeop0;
11733 	tree rhs = treeop1;
11734 	gcc_assert (ignore);
11735 
11736 	/* Check for |= or &= of a bitfield of size one into another bitfield
11737 	   of size 1.  In this case, (unless we need the result of the
11738 	   assignment) we can do this more efficiently with a
11739 	   test followed by an assignment, if necessary.
11740 
11741 	   ??? At this point, we can't get a BIT_FIELD_REF here.  But if
11742 	   things change so we do, this code should be enhanced to
11743 	   support it.  */
11744 	if (TREE_CODE (lhs) == COMPONENT_REF
11745 	    && (TREE_CODE (rhs) == BIT_IOR_EXPR
11746 		|| TREE_CODE (rhs) == BIT_AND_EXPR)
11747 	    && TREE_OPERAND (rhs, 0) == lhs
11748 	    && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
11749 	    && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
11750 	    && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
11751 	  {
11752 	    rtx_code_label *label = gen_label_rtx ();
11753 	    int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
11754 	    profile_probability prob = profile_probability::uninitialized ();
11755  	    if (value)
11756  	      jumpifnot (TREE_OPERAND (rhs, 1), label, prob);
11757  	    else
11758  	      jumpif (TREE_OPERAND (rhs, 1), label, prob);
11759 	    expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
11760 			       false);
11761 	    do_pending_stack_adjust ();
11762 	    emit_label (label);
11763 	    return const0_rtx;
11764 	  }
11765 
11766 	expand_assignment (lhs, rhs, false);
11767 	return const0_rtx;
11768       }
11769 
11770     case ADDR_EXPR:
11771       return expand_expr_addr_expr (exp, target, tmode, modifier);
11772 
11773     case REALPART_EXPR:
11774       op0 = expand_normal (treeop0);
11775       return read_complex_part (op0, false);
11776 
11777     case IMAGPART_EXPR:
11778       op0 = expand_normal (treeop0);
11779       return read_complex_part (op0, true);
11780 
11781     case RETURN_EXPR:
11782     case LABEL_EXPR:
11783     case GOTO_EXPR:
11784     case SWITCH_EXPR:
11785     case ASM_EXPR:
11786       /* Expanded in cfgexpand.cc.  */
11787       gcc_unreachable ();
11788 
11789     case TRY_CATCH_EXPR:
11790     case CATCH_EXPR:
11791     case EH_FILTER_EXPR:
11792     case TRY_FINALLY_EXPR:
11793     case EH_ELSE_EXPR:
11794       /* Lowered by tree-eh.cc.  */
11795       gcc_unreachable ();
11796 
11797     case WITH_CLEANUP_EXPR:
11798     case CLEANUP_POINT_EXPR:
11799     case TARGET_EXPR:
11800     case CASE_LABEL_EXPR:
11801     case VA_ARG_EXPR:
11802     case BIND_EXPR:
11803     case INIT_EXPR:
11804     case CONJ_EXPR:
11805     case COMPOUND_EXPR:
11806     case PREINCREMENT_EXPR:
11807     case PREDECREMENT_EXPR:
11808     case POSTINCREMENT_EXPR:
11809     case POSTDECREMENT_EXPR:
11810     case LOOP_EXPR:
11811     case EXIT_EXPR:
11812     case COMPOUND_LITERAL_EXPR:
11813       /* Lowered by gimplify.cc.  */
11814       gcc_unreachable ();
11815 
11816     case FDESC_EXPR:
11817       /* Function descriptors are not valid except for as
11818 	 initialization constants, and should not be expanded.  */
11819       gcc_unreachable ();
11820 
11821     case WITH_SIZE_EXPR:
11822       /* WITH_SIZE_EXPR expands to its first argument.  The caller should
11823 	 have pulled out the size to use in whatever context it needed.  */
11824       return expand_expr_real (treeop0, original_target, tmode,
11825 			       modifier, alt_rtl, inner_reference_p);
11826 
11827     default:
11828       return expand_expr_real_2 (&ops, target, tmode, modifier);
11829     }
11830 }
11831 
11832 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11833    signedness of TYPE), possibly returning the result in TARGET.
11834    TYPE is known to be a partial integer type.  */
11835 static rtx
reduce_to_bit_field_precision(rtx exp,rtx target,tree type)11836 reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
11837 {
11838   scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
11839   HOST_WIDE_INT prec = TYPE_PRECISION (type);
11840   gcc_assert ((GET_MODE (exp) == VOIDmode || GET_MODE (exp) == mode)
11841 	      && (!target || GET_MODE (target) == mode));
11842 
11843   /* For constant values, reduce using wide_int_to_tree. */
11844   if (poly_int_rtx_p (exp))
11845     {
11846       auto value = wi::to_poly_wide (exp, mode);
11847       tree t = wide_int_to_tree (type, value);
11848       return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
11849     }
11850   else if (TYPE_UNSIGNED (type))
11851     {
11852       rtx mask = immed_wide_int_const
11853 	(wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
11854       return expand_and (mode, exp, mask, target);
11855     }
11856   else
11857     {
11858       int count = GET_MODE_PRECISION (mode) - prec;
11859       exp = expand_shift (LSHIFT_EXPR, mode, exp, count, target, 0);
11860       return expand_shift (RSHIFT_EXPR, mode, exp, count, target, 0);
11861     }
11862 }
11863 
11864 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11865    when applied to the address of EXP produces an address known to be
11866    aligned more than BIGGEST_ALIGNMENT.  */
11867 
11868 static int
is_aligning_offset(const_tree offset,const_tree exp)11869 is_aligning_offset (const_tree offset, const_tree exp)
11870 {
11871   /* Strip off any conversions.  */
11872   while (CONVERT_EXPR_P (offset))
11873     offset = TREE_OPERAND (offset, 0);
11874 
11875   /* We must now have a BIT_AND_EXPR with a constant that is one less than
11876      power of 2 and which is larger than BIGGEST_ALIGNMENT.  */
11877   if (TREE_CODE (offset) != BIT_AND_EXPR
11878       || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
11879       || compare_tree_int (TREE_OPERAND (offset, 1),
11880 			   BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
11881       || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
11882     return 0;
11883 
11884   /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11885      It must be NEGATE_EXPR.  Then strip any more conversions.  */
11886   offset = TREE_OPERAND (offset, 0);
11887   while (CONVERT_EXPR_P (offset))
11888     offset = TREE_OPERAND (offset, 0);
11889 
11890   if (TREE_CODE (offset) != NEGATE_EXPR)
11891     return 0;
11892 
11893   offset = TREE_OPERAND (offset, 0);
11894   while (CONVERT_EXPR_P (offset))
11895     offset = TREE_OPERAND (offset, 0);
11896 
11897   /* This must now be the address of EXP.  */
11898   return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp;
11899 }
11900 
11901 /* Return a STRING_CST corresponding to ARG's constant initializer either
11902    if it's a string constant, or, when VALREP is set, any other constant,
11903    or null otherwise.
11904    On success, set *PTR_OFFSET to the (possibly non-constant) byte offset
11905    within the byte string that ARG is references.  If nonnull set *MEM_SIZE
11906    to the size of the byte string.  If nonnull, set *DECL to the constant
11907    declaration ARG refers to.  */
11908 
11909 static tree
constant_byte_string(tree arg,tree * ptr_offset,tree * mem_size,tree * decl,bool valrep=false)11910 constant_byte_string (tree arg, tree *ptr_offset, tree *mem_size, tree *decl,
11911 		      bool valrep = false)
11912 {
11913   tree dummy = NULL_TREE;
11914   if (!mem_size)
11915     mem_size = &dummy;
11916 
11917   /* Store the type of the original expression before conversions
11918      via NOP_EXPR or POINTER_PLUS_EXPR to other types have been
11919      removed.  */
11920   tree argtype = TREE_TYPE (arg);
11921 
11922   tree array;
11923   STRIP_NOPS (arg);
11924 
11925   /* Non-constant index into the character array in an ARRAY_REF
11926      expression or null.  */
11927   tree varidx = NULL_TREE;
11928 
11929   poly_int64 base_off = 0;
11930 
11931   if (TREE_CODE (arg) == ADDR_EXPR)
11932     {
11933       arg = TREE_OPERAND (arg, 0);
11934       tree ref = arg;
11935       if (TREE_CODE (arg) == ARRAY_REF)
11936 	{
11937 	  tree idx = TREE_OPERAND (arg, 1);
11938 	  if (TREE_CODE (idx) != INTEGER_CST)
11939 	    {
11940 	      /* From a pointer (but not array) argument extract the variable
11941 		 index to prevent get_addr_base_and_unit_offset() from failing
11942 		 due to it.  Use it later to compute the non-constant offset
11943 		 into the string and return it to the caller.  */
11944 	      varidx = idx;
11945 	      ref = TREE_OPERAND (arg, 0);
11946 
11947 	      if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE)
11948 		return NULL_TREE;
11949 
11950 	      if (!integer_zerop (array_ref_low_bound (arg)))
11951 		return NULL_TREE;
11952 
11953 	      if (!integer_onep (array_ref_element_size (arg)))
11954 		return NULL_TREE;
11955 	    }
11956 	}
11957       array = get_addr_base_and_unit_offset (ref, &base_off);
11958       if (!array
11959 	  || (TREE_CODE (array) != VAR_DECL
11960 	      && TREE_CODE (array) != CONST_DECL
11961 	      && TREE_CODE (array) != STRING_CST))
11962 	return NULL_TREE;
11963     }
11964   else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
11965     {
11966       tree arg0 = TREE_OPERAND (arg, 0);
11967       tree arg1 = TREE_OPERAND (arg, 1);
11968 
11969       tree offset;
11970       tree str = string_constant (arg0, &offset, mem_size, decl);
11971       if (!str)
11972 	{
11973 	   str = string_constant (arg1, &offset, mem_size, decl);
11974 	   arg1 = arg0;
11975 	}
11976 
11977       if (str)
11978 	{
11979 	  /* Avoid pointers to arrays (see bug 86622).  */
11980 	  if (POINTER_TYPE_P (TREE_TYPE (arg))
11981 	      && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == ARRAY_TYPE
11982 	      && !(decl && !*decl)
11983 	      && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11984 		   && tree_fits_uhwi_p (*mem_size)
11985 		   && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11986 	    return NULL_TREE;
11987 
11988 	  tree type = TREE_TYPE (offset);
11989 	  arg1 = fold_convert (type, arg1);
11990 	  *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, arg1);
11991 	  return str;
11992 	}
11993       return NULL_TREE;
11994     }
11995   else if (TREE_CODE (arg) == SSA_NAME)
11996     {
11997       gimple *stmt = SSA_NAME_DEF_STMT (arg);
11998       if (!is_gimple_assign (stmt))
11999 	return NULL_TREE;
12000 
12001       tree rhs1 = gimple_assign_rhs1 (stmt);
12002       tree_code code = gimple_assign_rhs_code (stmt);
12003       if (code == ADDR_EXPR)
12004 	return string_constant (rhs1, ptr_offset, mem_size, decl);
12005       else if (code != POINTER_PLUS_EXPR)
12006 	return NULL_TREE;
12007 
12008       tree offset;
12009       if (tree str = string_constant (rhs1, &offset, mem_size, decl))
12010 	{
12011 	  /* Avoid pointers to arrays (see bug 86622).  */
12012 	  if (POINTER_TYPE_P (TREE_TYPE (rhs1))
12013 	      && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1))) == ARRAY_TYPE
12014 	      && !(decl && !*decl)
12015 	      && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
12016 		   && tree_fits_uhwi_p (*mem_size)
12017 		   && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
12018 	    return NULL_TREE;
12019 
12020 	  tree rhs2 = gimple_assign_rhs2 (stmt);
12021 	  tree type = TREE_TYPE (offset);
12022 	  rhs2 = fold_convert (type, rhs2);
12023 	  *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, rhs2);
12024 	  return str;
12025 	}
12026       return NULL_TREE;
12027     }
12028   else if (DECL_P (arg))
12029     array = arg;
12030   else
12031     return NULL_TREE;
12032 
12033   tree offset = wide_int_to_tree (sizetype, base_off);
12034   if (varidx)
12035     {
12036       if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE)
12037 	return NULL_TREE;
12038 
12039       gcc_assert (TREE_CODE (arg) == ARRAY_REF);
12040       tree chartype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg, 0)));
12041       if (TREE_CODE (chartype) != INTEGER_TYPE)
12042 	return NULL;
12043 
12044       offset = fold_convert (sizetype, varidx);
12045     }
12046 
12047   if (TREE_CODE (array) == STRING_CST)
12048     {
12049       *ptr_offset = fold_convert (sizetype, offset);
12050       *mem_size = TYPE_SIZE_UNIT (TREE_TYPE (array));
12051       if (decl)
12052 	*decl = NULL_TREE;
12053       gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array)))
12054 			   >= TREE_STRING_LENGTH (array));
12055       return array;
12056     }
12057 
12058   tree init = ctor_for_folding (array);
12059   if (!init || init == error_mark_node)
12060     return NULL_TREE;
12061 
12062   if (valrep)
12063     {
12064       HOST_WIDE_INT cstoff;
12065       if (!base_off.is_constant (&cstoff))
12066 	return NULL_TREE;
12067 
12068       /* Check that the host and target are sane.  */
12069       if (CHAR_BIT != 8 || BITS_PER_UNIT != 8)
12070 	return NULL_TREE;
12071 
12072       HOST_WIDE_INT typesz = int_size_in_bytes (TREE_TYPE (init));
12073       if (typesz <= 0 || (int) typesz != typesz)
12074 	return NULL_TREE;
12075 
12076       HOST_WIDE_INT size = typesz;
12077       if (VAR_P (array)
12078 	  && DECL_SIZE_UNIT (array)
12079 	  && tree_fits_shwi_p (DECL_SIZE_UNIT (array)))
12080 	{
12081 	  size = tree_to_shwi (DECL_SIZE_UNIT (array));
12082 	  gcc_checking_assert (size >= typesz);
12083 	}
12084 
12085       /* If value representation was requested convert the initializer
12086 	 for the whole array or object into a string of bytes forming
12087 	 its value representation and return it.  */
12088       unsigned char *bytes = XNEWVEC (unsigned char, size);
12089       int r = native_encode_initializer (init, bytes, size);
12090       if (r < typesz)
12091 	{
12092 	  XDELETEVEC (bytes);
12093 	  return NULL_TREE;
12094 	}
12095 
12096       if (r < size)
12097 	memset (bytes + r, '\0', size - r);
12098 
12099       const char *p = reinterpret_cast<const char *>(bytes);
12100       init = build_string_literal (size, p, char_type_node);
12101       init = TREE_OPERAND (init, 0);
12102       init = TREE_OPERAND (init, 0);
12103       XDELETE (bytes);
12104 
12105       *mem_size = size_int (TREE_STRING_LENGTH (init));
12106       *ptr_offset = wide_int_to_tree (ssizetype, base_off);
12107 
12108       if (decl)
12109 	*decl = array;
12110 
12111       return init;
12112     }
12113 
12114   if (TREE_CODE (init) == CONSTRUCTOR)
12115     {
12116       /* Convert the 64-bit constant offset to a wider type to avoid
12117 	 overflow and use it to obtain the initializer for the subobject
12118 	 it points into.  */
12119       offset_int wioff;
12120       if (!base_off.is_constant (&wioff))
12121 	return NULL_TREE;
12122 
12123       wioff *= BITS_PER_UNIT;
12124       if (!wi::fits_uhwi_p (wioff))
12125 	return NULL_TREE;
12126 
12127       base_off = wioff.to_uhwi ();
12128       unsigned HOST_WIDE_INT fieldoff = 0;
12129       init = fold_ctor_reference (TREE_TYPE (arg), init, base_off, 0, array,
12130 				  &fieldoff);
12131       if (!init || init == error_mark_node)
12132 	return NULL_TREE;
12133 
12134       HOST_WIDE_INT cstoff;
12135       if (!base_off.is_constant (&cstoff))
12136 	return NULL_TREE;
12137 
12138       cstoff = (cstoff - fieldoff) / BITS_PER_UNIT;
12139       tree off = build_int_cst (sizetype, cstoff);
12140       if (varidx)
12141 	offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, off);
12142       else
12143 	offset = off;
12144     }
12145 
12146   *ptr_offset = offset;
12147 
12148   tree inittype = TREE_TYPE (init);
12149 
12150   if (TREE_CODE (init) == INTEGER_CST
12151       && (TREE_CODE (TREE_TYPE (array)) == INTEGER_TYPE
12152 	  || TYPE_MAIN_VARIANT (inittype) == char_type_node))
12153     {
12154       /* Check that the host and target are sane.  */
12155       if (CHAR_BIT != 8 || BITS_PER_UNIT != 8)
12156 	return NULL_TREE;
12157 
12158       /* For a reference to (address of) a single constant character,
12159 	 store the native representation of the character in CHARBUF.
12160 	 If the reference is to an element of an array or a member
12161 	 of a struct, only consider narrow characters until ctors
12162 	 for wide character arrays are transformed to STRING_CSTs
12163 	 like those for narrow arrays.  */
12164       unsigned char charbuf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT];
12165       int len = native_encode_expr (init, charbuf, sizeof charbuf, 0);
12166       if (len > 0)
12167 	{
12168 	  /* Construct a string literal with elements of INITTYPE and
12169 	     the representation above.  Then strip
12170 	     the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST.  */
12171 	  init = build_string_literal (len, (char *)charbuf, inittype);
12172 	  init = TREE_OPERAND (TREE_OPERAND (init, 0), 0);
12173 	}
12174     }
12175 
12176   tree initsize = TYPE_SIZE_UNIT (inittype);
12177 
12178   if (TREE_CODE (init) == CONSTRUCTOR && initializer_zerop (init))
12179     {
12180       /* Fold an empty/zero constructor for an implicitly initialized
12181 	 object or subobject into the empty string.  */
12182 
12183       /* Determine the character type from that of the original
12184 	 expression.  */
12185       tree chartype = argtype;
12186       if (POINTER_TYPE_P (chartype))
12187 	chartype = TREE_TYPE (chartype);
12188       while (TREE_CODE (chartype) == ARRAY_TYPE)
12189 	chartype = TREE_TYPE (chartype);
12190 
12191       if (INTEGRAL_TYPE_P (chartype)
12192 	  && TYPE_PRECISION (chartype) == TYPE_PRECISION (char_type_node))
12193 	{
12194 	  /* Convert a char array to an empty STRING_CST having an array
12195 	     of the expected type and size.  */
12196 	  if (!initsize)
12197 	    initsize = integer_zero_node;
12198 
12199 	  unsigned HOST_WIDE_INT size = tree_to_uhwi (initsize);
12200 	  if (size > (unsigned HOST_WIDE_INT) INT_MAX)
12201 	    return NULL_TREE;
12202 
12203 	  init = build_string_literal (size, NULL, chartype, size);
12204 	  init = TREE_OPERAND (init, 0);
12205 	  init = TREE_OPERAND (init, 0);
12206 
12207 	  *ptr_offset = integer_zero_node;
12208 	}
12209     }
12210 
12211   if (decl)
12212     *decl = array;
12213 
12214   if (TREE_CODE (init) != STRING_CST)
12215     return NULL_TREE;
12216 
12217   *mem_size = initsize;
12218 
12219   gcc_checking_assert (tree_to_shwi (initsize) >= TREE_STRING_LENGTH (init));
12220 
12221   return init;
12222 }
12223 
12224 /* Return STRING_CST if an ARG corresponds to a string constant or zero
12225    if it doesn't.  If we return nonzero, set *PTR_OFFSET to the (possibly
12226    non-constant) offset in bytes within the string that ARG is accessing.
12227    If MEM_SIZE is non-zero the storage size of the memory is returned.
12228    If DECL is non-zero the constant declaration is returned if available.  */
12229 
12230 tree
string_constant(tree arg,tree * ptr_offset,tree * mem_size,tree * decl)12231 string_constant (tree arg, tree *ptr_offset, tree *mem_size, tree *decl)
12232 {
12233   return constant_byte_string (arg, ptr_offset, mem_size, decl, false);
12234 }
12235 
12236 /* Similar to string_constant, return a STRING_CST corresponding
12237    to the value representation of the first argument if it's
12238    a constant.  */
12239 
12240 tree
byte_representation(tree arg,tree * ptr_offset,tree * mem_size,tree * decl)12241 byte_representation (tree arg, tree *ptr_offset, tree *mem_size, tree *decl)
12242 {
12243   return constant_byte_string (arg, ptr_offset, mem_size, decl, true);
12244 }
12245 
12246 /* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
12247    is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
12248    for C2 > 0 to x & C3 == C2
12249    for C2 < 0 to x & C3 == (C2 & C3).  */
12250 enum tree_code
maybe_optimize_pow2p_mod_cmp(enum tree_code code,tree * arg0,tree * arg1)12251 maybe_optimize_pow2p_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
12252 {
12253   gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
12254   tree treeop0 = gimple_assign_rhs1 (stmt);
12255   tree treeop1 = gimple_assign_rhs2 (stmt);
12256   tree type = TREE_TYPE (*arg0);
12257   scalar_int_mode mode;
12258   if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
12259     return code;
12260   if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
12261       || TYPE_PRECISION (type) <= 1
12262       || TYPE_UNSIGNED (type)
12263       /* Signed x % c == 0 should have been optimized into unsigned modulo
12264 	 earlier.  */
12265       || integer_zerop (*arg1)
12266       /* If c is known to be non-negative, modulo will be expanded as unsigned
12267 	 modulo.  */
12268       || get_range_pos_neg (treeop0) == 1)
12269     return code;
12270 
12271   /* x % c == d where d < 0 && d <= -c should be always false.  */
12272   if (tree_int_cst_sgn (*arg1) == -1
12273       && -wi::to_widest (treeop1) >= wi::to_widest (*arg1))
12274     return code;
12275 
12276   int prec = TYPE_PRECISION (type);
12277   wide_int w = wi::to_wide (treeop1) - 1;
12278   w |= wi::shifted_mask (0, prec - 1, true, prec);
12279   tree c3 = wide_int_to_tree (type, w);
12280   tree c4 = *arg1;
12281   if (tree_int_cst_sgn (*arg1) == -1)
12282     c4 = wide_int_to_tree (type, w & wi::to_wide (*arg1));
12283 
12284   rtx op0 = expand_normal (treeop0);
12285   treeop0 = make_tree (TREE_TYPE (treeop0), op0);
12286 
12287   bool speed_p = optimize_insn_for_speed_p ();
12288 
12289   do_pending_stack_adjust ();
12290 
12291   location_t loc = gimple_location (stmt);
12292   struct separate_ops ops;
12293   ops.code = TRUNC_MOD_EXPR;
12294   ops.location = loc;
12295   ops.type = TREE_TYPE (treeop0);
12296   ops.op0 = treeop0;
12297   ops.op1 = treeop1;
12298   ops.op2 = NULL_TREE;
12299   start_sequence ();
12300   rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
12301 				EXPAND_NORMAL);
12302   rtx_insn *moinsns = get_insns ();
12303   end_sequence ();
12304 
12305   unsigned mocost = seq_cost (moinsns, speed_p);
12306   mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
12307   mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
12308 
12309   ops.code = BIT_AND_EXPR;
12310   ops.location = loc;
12311   ops.type = TREE_TYPE (treeop0);
12312   ops.op0 = treeop0;
12313   ops.op1 = c3;
12314   ops.op2 = NULL_TREE;
12315   start_sequence ();
12316   rtx mur = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
12317 				EXPAND_NORMAL);
12318   rtx_insn *muinsns = get_insns ();
12319   end_sequence ();
12320 
12321   unsigned mucost = seq_cost (muinsns, speed_p);
12322   mucost += rtx_cost (mur, mode, EQ, 0, speed_p);
12323   mucost += rtx_cost (expand_normal (c4), mode, EQ, 1, speed_p);
12324 
12325   if (mocost <= mucost)
12326     {
12327       emit_insn (moinsns);
12328       *arg0 = make_tree (TREE_TYPE (*arg0), mor);
12329       return code;
12330     }
12331 
12332   emit_insn (muinsns);
12333   *arg0 = make_tree (TREE_TYPE (*arg0), mur);
12334   *arg1 = c4;
12335   return code;
12336 }
12337 
12338 /* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
12339    If C1 is odd to:
12340    (X - C2) * C3 <= C4 (or >), where
12341    C3 is modular multiplicative inverse of C1 and 1<<prec and
12342    C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
12343    if C2 > ((1<<prec) - 1) % C1).
12344    If C1 is even, S = ctz (C1) and C2 is 0, use
12345    ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
12346    inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
12347 
12348    For signed (X % C1) == 0 if C1 is odd to (all operations in it
12349    unsigned):
12350    (X * C3) + C4 <= 2 * C4, where
12351    C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
12352    C4 is ((1<<(prec - 1) - 1) / C1).
12353    If C1 is even, S = ctz(C1), use
12354    ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
12355    where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
12356    and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
12357 
12358    See the Hacker's Delight book, section 10-17.  */
12359 enum tree_code
maybe_optimize_mod_cmp(enum tree_code code,tree * arg0,tree * arg1)12360 maybe_optimize_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
12361 {
12362   gcc_checking_assert (code == EQ_EXPR || code == NE_EXPR);
12363   gcc_checking_assert (TREE_CODE (*arg1) == INTEGER_CST);
12364 
12365   if (optimize < 2)
12366     return code;
12367 
12368   gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
12369   if (stmt == NULL)
12370     return code;
12371 
12372   tree treeop0 = gimple_assign_rhs1 (stmt);
12373   tree treeop1 = gimple_assign_rhs2 (stmt);
12374   if (TREE_CODE (treeop0) != SSA_NAME
12375       || TREE_CODE (treeop1) != INTEGER_CST
12376       /* Don't optimize the undefined behavior case x % 0;
12377 	 x % 1 should have been optimized into zero, punt if
12378 	 it makes it here for whatever reason;
12379 	 x % -c should have been optimized into x % c.  */
12380       || compare_tree_int (treeop1, 2) <= 0
12381       /* Likewise x % c == d where d >= c should be always false.  */
12382       || tree_int_cst_le (treeop1, *arg1))
12383     return code;
12384 
12385   /* Unsigned x % pow2 is handled right already, for signed
12386      modulo handle it in maybe_optimize_pow2p_mod_cmp.  */
12387   if (integer_pow2p (treeop1))
12388     return maybe_optimize_pow2p_mod_cmp (code, arg0, arg1);
12389 
12390   tree type = TREE_TYPE (*arg0);
12391   scalar_int_mode mode;
12392   if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
12393     return code;
12394   if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
12395       || TYPE_PRECISION (type) <= 1)
12396     return code;
12397 
12398   signop sgn = UNSIGNED;
12399   /* If both operands are known to have the sign bit clear, handle
12400      even the signed modulo case as unsigned.  treeop1 is always
12401      positive >= 2, checked above.  */
12402   if (!TYPE_UNSIGNED (type) && get_range_pos_neg (treeop0) != 1)
12403     sgn = SIGNED;
12404 
12405   if (!TYPE_UNSIGNED (type))
12406     {
12407       if (tree_int_cst_sgn (*arg1) == -1)
12408 	return code;
12409       type = unsigned_type_for (type);
12410       if (!type || TYPE_MODE (type) != TYPE_MODE (TREE_TYPE (*arg0)))
12411 	return code;
12412     }
12413 
12414   int prec = TYPE_PRECISION (type);
12415   wide_int w = wi::to_wide (treeop1);
12416   int shift = wi::ctz (w);
12417   /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
12418      C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
12419      If C1 is odd, we can handle all cases by subtracting
12420      C4 below.  We could handle even the even C1 and C2 > -1U % C1 cases
12421      e.g. by testing for overflow on the subtraction, punt on that for now
12422      though.  */
12423   if ((sgn == SIGNED || shift) && !integer_zerop (*arg1))
12424     {
12425       if (sgn == SIGNED)
12426 	return code;
12427       wide_int x = wi::umod_trunc (wi::mask (prec, false, prec), w);
12428       if (wi::gtu_p (wi::to_wide (*arg1), x))
12429 	return code;
12430     }
12431 
12432   imm_use_iterator imm_iter;
12433   use_operand_p use_p;
12434   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, treeop0)
12435     {
12436       gimple *use_stmt = USE_STMT (use_p);
12437       /* Punt if treeop0 is used in the same bb in a division
12438 	 or another modulo with the same divisor.  We should expect
12439 	 the division and modulo combined together.  */
12440       if (use_stmt == stmt
12441 	  || gimple_bb (use_stmt) != gimple_bb (stmt))
12442 	continue;
12443       if (!is_gimple_assign (use_stmt)
12444 	  || (gimple_assign_rhs_code (use_stmt) != TRUNC_DIV_EXPR
12445 	      && gimple_assign_rhs_code (use_stmt) != TRUNC_MOD_EXPR))
12446 	continue;
12447       if (gimple_assign_rhs1 (use_stmt) != treeop0
12448 	  || !operand_equal_p (gimple_assign_rhs2 (use_stmt), treeop1, 0))
12449 	continue;
12450       return code;
12451     }
12452 
12453   w = wi::lrshift (w, shift);
12454   wide_int a = wide_int::from (w, prec + 1, UNSIGNED);
12455   wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
12456   wide_int m = wide_int::from (wi::mod_inv (a, b), prec, UNSIGNED);
12457   tree c3 = wide_int_to_tree (type, m);
12458   tree c5 = NULL_TREE;
12459   wide_int d, e;
12460   if (sgn == UNSIGNED)
12461     {
12462       d = wi::divmod_trunc (wi::mask (prec, false, prec), w, UNSIGNED, &e);
12463       /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
12464 	 otherwise use < or subtract one from C4.  E.g. for
12465 	 x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
12466 	 x % 3U == 1 already needs to be
12467 	 (x - 1) * 0xaaaaaaabU <= 0x55555554.  */
12468       if (!shift && wi::gtu_p (wi::to_wide (*arg1), e))
12469 	d -= 1;
12470       if (shift)
12471 	d = wi::lrshift (d, shift);
12472     }
12473   else
12474     {
12475       e = wi::udiv_trunc (wi::mask (prec - 1, false, prec), w);
12476       if (!shift)
12477 	d = wi::lshift (e, 1);
12478       else
12479 	{
12480 	  e = wi::bit_and (e, wi::mask (shift, true, prec));
12481 	  d = wi::lrshift (e, shift - 1);
12482 	}
12483       c5 = wide_int_to_tree (type, e);
12484     }
12485   tree c4 = wide_int_to_tree (type, d);
12486 
12487   rtx op0 = expand_normal (treeop0);
12488   treeop0 = make_tree (TREE_TYPE (treeop0), op0);
12489 
12490   bool speed_p = optimize_insn_for_speed_p ();
12491 
12492   do_pending_stack_adjust ();
12493 
12494   location_t loc = gimple_location (stmt);
12495   struct separate_ops ops;
12496   ops.code = TRUNC_MOD_EXPR;
12497   ops.location = loc;
12498   ops.type = TREE_TYPE (treeop0);
12499   ops.op0 = treeop0;
12500   ops.op1 = treeop1;
12501   ops.op2 = NULL_TREE;
12502   start_sequence ();
12503   rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
12504 				EXPAND_NORMAL);
12505   rtx_insn *moinsns = get_insns ();
12506   end_sequence ();
12507 
12508   unsigned mocost = seq_cost (moinsns, speed_p);
12509   mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
12510   mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
12511 
12512   tree t = fold_convert_loc (loc, type, treeop0);
12513   if (!integer_zerop (*arg1))
12514     t = fold_build2_loc (loc, MINUS_EXPR, type, t, fold_convert (type, *arg1));
12515   t = fold_build2_loc (loc, MULT_EXPR, type, t, c3);
12516   if (sgn == SIGNED)
12517     t = fold_build2_loc (loc, PLUS_EXPR, type, t, c5);
12518   if (shift)
12519     {
12520       tree s = build_int_cst (NULL_TREE, shift);
12521       t = fold_build2_loc (loc, RROTATE_EXPR, type, t, s);
12522     }
12523 
12524   start_sequence ();
12525   rtx mur = expand_normal (t);
12526   rtx_insn *muinsns = get_insns ();
12527   end_sequence ();
12528 
12529   unsigned mucost = seq_cost (muinsns, speed_p);
12530   mucost += rtx_cost (mur, mode, LE, 0, speed_p);
12531   mucost += rtx_cost (expand_normal (c4), mode, LE, 1, speed_p);
12532 
12533   if (mocost <= mucost)
12534     {
12535       emit_insn (moinsns);
12536       *arg0 = make_tree (TREE_TYPE (*arg0), mor);
12537       return code;
12538     }
12539 
12540   emit_insn (muinsns);
12541   *arg0 = make_tree (type, mur);
12542   *arg1 = c4;
12543   return code == EQ_EXPR ? LE_EXPR : GT_EXPR;
12544 }
12545 
12546 /* Optimize x - y < 0 into x < 0 if x - y has undefined overflow.  */
12547 
12548 void
maybe_optimize_sub_cmp_0(enum tree_code code,tree * arg0,tree * arg1)12549 maybe_optimize_sub_cmp_0 (enum tree_code code, tree *arg0, tree *arg1)
12550 {
12551   gcc_checking_assert (code == GT_EXPR || code == GE_EXPR
12552 		       || code == LT_EXPR || code == LE_EXPR);
12553   gcc_checking_assert (integer_zerop (*arg1));
12554 
12555   if (!optimize)
12556     return;
12557 
12558   gimple *stmt = get_def_for_expr (*arg0, MINUS_EXPR);
12559   if (stmt == NULL)
12560     return;
12561 
12562   tree treeop0 = gimple_assign_rhs1 (stmt);
12563   tree treeop1 = gimple_assign_rhs2 (stmt);
12564   if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (treeop0)))
12565     return;
12566 
12567   if (issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_COMPARISON))
12568     warning_at (gimple_location (stmt), OPT_Wstrict_overflow,
12569 		"assuming signed overflow does not occur when "
12570 		"simplifying %<X - Y %s 0%> to %<X %s Y%>",
12571 		op_symbol_code (code), op_symbol_code (code));
12572 
12573   *arg0 = treeop0;
12574   *arg1 = treeop1;
12575 }
12576 
12577 /* Generate code to calculate OPS, and exploded expression
12578    using a store-flag instruction and return an rtx for the result.
12579    OPS reflects a comparison.
12580 
12581    If TARGET is nonzero, store the result there if convenient.
12582 
12583    Return zero if there is no suitable set-flag instruction
12584    available on this machine.
12585 
12586    Once expand_expr has been called on the arguments of the comparison,
12587    we are committed to doing the store flag, since it is not safe to
12588    re-evaluate the expression.  We emit the store-flag insn by calling
12589    emit_store_flag, but only expand the arguments if we have a reason
12590    to believe that emit_store_flag will be successful.  If we think that
12591    it will, but it isn't, we have to simulate the store-flag with a
12592    set/jump/set sequence.  */
12593 
12594 static rtx
do_store_flag(sepops ops,rtx target,machine_mode mode)12595 do_store_flag (sepops ops, rtx target, machine_mode mode)
12596 {
12597   enum rtx_code code;
12598   tree arg0, arg1, type;
12599   machine_mode operand_mode;
12600   int unsignedp;
12601   rtx op0, op1;
12602   rtx subtarget = target;
12603   location_t loc = ops->location;
12604 
12605   arg0 = ops->op0;
12606   arg1 = ops->op1;
12607 
12608   /* Don't crash if the comparison was erroneous.  */
12609   if (arg0 == error_mark_node || arg1 == error_mark_node)
12610     return const0_rtx;
12611 
12612   type = TREE_TYPE (arg0);
12613   operand_mode = TYPE_MODE (type);
12614   unsignedp = TYPE_UNSIGNED (type);
12615 
12616   /* We won't bother with BLKmode store-flag operations because it would mean
12617      passing a lot of information to emit_store_flag.  */
12618   if (operand_mode == BLKmode)
12619     return 0;
12620 
12621   /* We won't bother with store-flag operations involving function pointers
12622      when function pointers must be canonicalized before comparisons.  */
12623   if (targetm.have_canonicalize_funcptr_for_compare ()
12624       && ((POINTER_TYPE_P (TREE_TYPE (arg0))
12625 	   && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))))
12626 	  || (POINTER_TYPE_P (TREE_TYPE (arg1))
12627 	      && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1))))))
12628     return 0;
12629 
12630   STRIP_NOPS (arg0);
12631   STRIP_NOPS (arg1);
12632 
12633   /* For vector typed comparisons emit code to generate the desired
12634      all-ones or all-zeros mask.  */
12635   if (TREE_CODE (ops->type) == VECTOR_TYPE)
12636     {
12637       tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
12638       if (VECTOR_BOOLEAN_TYPE_P (ops->type)
12639 	  && expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
12640 	return expand_vec_cmp_expr (ops->type, ifexp, target);
12641       else
12642 	gcc_unreachable ();
12643     }
12644 
12645   /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
12646      into (x - C2) * C3 < C4.  */
12647   if ((ops->code == EQ_EXPR || ops->code == NE_EXPR)
12648       && TREE_CODE (arg0) == SSA_NAME
12649       && TREE_CODE (arg1) == INTEGER_CST)
12650     {
12651       enum tree_code new_code = maybe_optimize_mod_cmp (ops->code,
12652 							&arg0, &arg1);
12653       if (new_code != ops->code)
12654 	{
12655 	  struct separate_ops nops = *ops;
12656 	  nops.code = ops->code = new_code;
12657 	  nops.op0 = arg0;
12658 	  nops.op1 = arg1;
12659 	  nops.type = TREE_TYPE (arg0);
12660 	  return do_store_flag (&nops, target, mode);
12661 	}
12662     }
12663 
12664   /* Optimize (x - y) < 0 into x < y if x - y has undefined overflow.  */
12665   if (!unsignedp
12666       && (ops->code == LT_EXPR || ops->code == LE_EXPR
12667 	  || ops->code == GT_EXPR || ops->code == GE_EXPR)
12668       && integer_zerop (arg1)
12669       && TREE_CODE (arg0) == SSA_NAME)
12670     maybe_optimize_sub_cmp_0 (ops->code, &arg0, &arg1);
12671 
12672   /* Get the rtx comparison code to use.  We know that EXP is a comparison
12673      operation of some type.  Some comparisons against 1 and -1 can be
12674      converted to comparisons with zero.  Do so here so that the tests
12675      below will be aware that we have a comparison with zero.   These
12676      tests will not catch constants in the first operand, but constants
12677      are rarely passed as the first operand.  */
12678 
12679   switch (ops->code)
12680     {
12681     case EQ_EXPR:
12682       code = EQ;
12683       break;
12684     case NE_EXPR:
12685       code = NE;
12686       break;
12687     case LT_EXPR:
12688       if (integer_onep (arg1))
12689 	arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
12690       else
12691 	code = unsignedp ? LTU : LT;
12692       break;
12693     case LE_EXPR:
12694       if (! unsignedp && integer_all_onesp (arg1))
12695 	arg1 = integer_zero_node, code = LT;
12696       else
12697 	code = unsignedp ? LEU : LE;
12698       break;
12699     case GT_EXPR:
12700       if (! unsignedp && integer_all_onesp (arg1))
12701 	arg1 = integer_zero_node, code = GE;
12702       else
12703 	code = unsignedp ? GTU : GT;
12704       break;
12705     case GE_EXPR:
12706       if (integer_onep (arg1))
12707 	arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
12708       else
12709 	code = unsignedp ? GEU : GE;
12710       break;
12711 
12712     case UNORDERED_EXPR:
12713       code = UNORDERED;
12714       break;
12715     case ORDERED_EXPR:
12716       code = ORDERED;
12717       break;
12718     case UNLT_EXPR:
12719       code = UNLT;
12720       break;
12721     case UNLE_EXPR:
12722       code = UNLE;
12723       break;
12724     case UNGT_EXPR:
12725       code = UNGT;
12726       break;
12727     case UNGE_EXPR:
12728       code = UNGE;
12729       break;
12730     case UNEQ_EXPR:
12731       code = UNEQ;
12732       break;
12733     case LTGT_EXPR:
12734       code = LTGT;
12735       break;
12736 
12737     default:
12738       gcc_unreachable ();
12739     }
12740 
12741   /* Put a constant second.  */
12742   if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
12743       || TREE_CODE (arg0) == FIXED_CST)
12744     {
12745       std::swap (arg0, arg1);
12746       code = swap_condition (code);
12747     }
12748 
12749   /* If this is an equality or inequality test of a single bit, we can
12750      do this by shifting the bit being tested to the low-order bit and
12751      masking the result with the constant 1.  If the condition was EQ,
12752      we xor it with 1.  This does not require an scc insn and is faster
12753      than an scc insn even if we have it.
12754 
12755      The code to make this transformation was moved into fold_single_bit_test,
12756      so we just call into the folder and expand its result.  */
12757 
12758   if ((code == NE || code == EQ)
12759       && integer_zerop (arg1)
12760       && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
12761     {
12762       gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR);
12763       if (srcstmt
12764 	  && integer_pow2p (gimple_assign_rhs2 (srcstmt)))
12765 	{
12766 	  enum tree_code tcode = code == NE ? NE_EXPR : EQ_EXPR;
12767 	  type = lang_hooks.types.type_for_mode (mode, unsignedp);
12768 	  tree temp = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg1),
12769 				       gimple_assign_rhs1 (srcstmt),
12770 				       gimple_assign_rhs2 (srcstmt));
12771 	  temp = fold_single_bit_test (loc, tcode, temp, arg1, type);
12772 	  if (temp)
12773 	    return expand_expr (temp, target, VOIDmode, EXPAND_NORMAL);
12774 	}
12775     }
12776 
12777   if (! get_subtarget (target)
12778       || GET_MODE (subtarget) != operand_mode)
12779     subtarget = 0;
12780 
12781   expand_operands (arg0, arg1, subtarget, &op0, &op1, EXPAND_NORMAL);
12782 
12783   if (target == 0)
12784     target = gen_reg_rtx (mode);
12785 
12786   /* Try a cstore if possible.  */
12787   return emit_store_flag_force (target, code, op0, op1,
12788 				operand_mode, unsignedp,
12789 				(TYPE_PRECISION (ops->type) == 1
12790 				 && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
12791 }
12792 
12793 /* Attempt to generate a casesi instruction.  Returns 1 if successful,
12794    0 otherwise (i.e. if there is no casesi instruction).
12795 
12796    DEFAULT_PROBABILITY is the probability of jumping to the default
12797    label.  */
12798 int
try_casesi(tree index_type,tree index_expr,tree minval,tree range,rtx table_label,rtx default_label,rtx fallback_label,profile_probability default_probability)12799 try_casesi (tree index_type, tree index_expr, tree minval, tree range,
12800 	    rtx table_label, rtx default_label, rtx fallback_label,
12801             profile_probability default_probability)
12802 {
12803   class expand_operand ops[5];
12804   scalar_int_mode index_mode = SImode;
12805   rtx op1, op2, index;
12806 
12807   if (! targetm.have_casesi ())
12808     return 0;
12809 
12810   /* The index must be some form of integer.  Convert it to SImode.  */
12811   scalar_int_mode omode = SCALAR_INT_TYPE_MODE (index_type);
12812   if (GET_MODE_BITSIZE (omode) > GET_MODE_BITSIZE (index_mode))
12813     {
12814       rtx rangertx = expand_normal (range);
12815 
12816       /* We must handle the endpoints in the original mode.  */
12817       index_expr = build2 (MINUS_EXPR, index_type,
12818 			   index_expr, minval);
12819       minval = integer_zero_node;
12820       index = expand_normal (index_expr);
12821       if (default_label)
12822         emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
12823 				 omode, 1, default_label,
12824                                  default_probability);
12825       /* Now we can safely truncate.  */
12826       index = convert_to_mode (index_mode, index, 0);
12827     }
12828   else
12829     {
12830       if (omode != index_mode)
12831 	{
12832 	  index_type = lang_hooks.types.type_for_mode (index_mode, 0);
12833 	  index_expr = fold_convert (index_type, index_expr);
12834 	}
12835 
12836       index = expand_normal (index_expr);
12837     }
12838 
12839   do_pending_stack_adjust ();
12840 
12841   op1 = expand_normal (minval);
12842   op2 = expand_normal (range);
12843 
12844   create_input_operand (&ops[0], index, index_mode);
12845   create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
12846   create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
12847   create_fixed_operand (&ops[3], table_label);
12848   create_fixed_operand (&ops[4], (default_label
12849 				  ? default_label
12850 				  : fallback_label));
12851   expand_jump_insn (targetm.code_for_casesi, 5, ops);
12852   return 1;
12853 }
12854 
12855 /* Attempt to generate a tablejump instruction; same concept.  */
12856 /* Subroutine of the next function.
12857 
12858    INDEX is the value being switched on, with the lowest value
12859    in the table already subtracted.
12860    MODE is its expected mode (needed if INDEX is constant).
12861    RANGE is the length of the jump table.
12862    TABLE_LABEL is a CODE_LABEL rtx for the table itself.
12863 
12864    DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
12865    index value is out of range.
12866    DEFAULT_PROBABILITY is the probability of jumping to
12867    the default label.  */
12868 
12869 static void
do_tablejump(rtx index,machine_mode mode,rtx range,rtx table_label,rtx default_label,profile_probability default_probability)12870 do_tablejump (rtx index, machine_mode mode, rtx range, rtx table_label,
12871 	      rtx default_label, profile_probability default_probability)
12872 {
12873   rtx temp, vector;
12874 
12875   if (INTVAL (range) > cfun->cfg->max_jumptable_ents)
12876     cfun->cfg->max_jumptable_ents = INTVAL (range);
12877 
12878   /* Do an unsigned comparison (in the proper mode) between the index
12879      expression and the value which represents the length of the range.
12880      Since we just finished subtracting the lower bound of the range
12881      from the index expression, this comparison allows us to simultaneously
12882      check that the original index expression value is both greater than
12883      or equal to the minimum value of the range and less than or equal to
12884      the maximum value of the range.  */
12885 
12886   if (default_label)
12887     emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
12888 			     default_label, default_probability);
12889 
12890   /* If index is in range, it must fit in Pmode.
12891      Convert to Pmode so we can index with it.  */
12892   if (mode != Pmode)
12893     {
12894       unsigned int width;
12895 
12896       /* We know the value of INDEX is between 0 and RANGE.  If we have a
12897 	 sign-extended subreg, and RANGE does not have the sign bit set, then
12898 	 we have a value that is valid for both sign and zero extension.  In
12899 	 this case, we get better code if we sign extend.  */
12900       if (GET_CODE (index) == SUBREG
12901 	  && SUBREG_PROMOTED_VAR_P (index)
12902 	  && SUBREG_PROMOTED_SIGNED_P (index)
12903 	  && ((width = GET_MODE_PRECISION (as_a <scalar_int_mode> (mode)))
12904 	      <= HOST_BITS_PER_WIDE_INT)
12905 	  && ! (UINTVAL (range) & (HOST_WIDE_INT_1U << (width - 1))))
12906 	index = convert_to_mode (Pmode, index, 0);
12907       else
12908 	index = convert_to_mode (Pmode, index, 1);
12909     }
12910 
12911   /* Don't let a MEM slip through, because then INDEX that comes
12912      out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
12913      and break_out_memory_refs will go to work on it and mess it up.  */
12914 #ifdef PIC_CASE_VECTOR_ADDRESS
12915   if (flag_pic && !REG_P (index))
12916     index = copy_to_mode_reg (Pmode, index);
12917 #endif
12918 
12919   /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
12920      GET_MODE_SIZE, because this indicates how large insns are.  The other
12921      uses should all be Pmode, because they are addresses.  This code
12922      could fail if addresses and insns are not the same size.  */
12923   index = simplify_gen_binary (MULT, Pmode, index,
12924 			       gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE),
12925 					     Pmode));
12926   index = simplify_gen_binary (PLUS, Pmode, index,
12927 			       gen_rtx_LABEL_REF (Pmode, table_label));
12928 
12929 #ifdef PIC_CASE_VECTOR_ADDRESS
12930   if (flag_pic)
12931     index = PIC_CASE_VECTOR_ADDRESS (index);
12932   else
12933 #endif
12934     index = memory_address (CASE_VECTOR_MODE, index);
12935   temp = gen_reg_rtx (CASE_VECTOR_MODE);
12936   vector = gen_const_mem (CASE_VECTOR_MODE, index);
12937   convert_move (temp, vector, 0);
12938 
12939   emit_jump_insn (targetm.gen_tablejump (temp, table_label));
12940 
12941   /* If we are generating PIC code or if the table is PC-relative, the
12942      table and JUMP_INSN must be adjacent, so don't output a BARRIER.  */
12943   if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
12944     emit_barrier ();
12945 }
12946 
12947 int
try_tablejump(tree index_type,tree index_expr,tree minval,tree range,rtx table_label,rtx default_label,profile_probability default_probability)12948 try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
12949 	       rtx table_label, rtx default_label,
12950 	       profile_probability default_probability)
12951 {
12952   rtx index;
12953 
12954   if (! targetm.have_tablejump ())
12955     return 0;
12956 
12957   index_expr = fold_build2 (MINUS_EXPR, index_type,
12958 			    fold_convert (index_type, index_expr),
12959 			    fold_convert (index_type, minval));
12960   index = expand_normal (index_expr);
12961   do_pending_stack_adjust ();
12962 
12963   do_tablejump (index, TYPE_MODE (index_type),
12964 		convert_modes (TYPE_MODE (index_type),
12965 			       TYPE_MODE (TREE_TYPE (range)),
12966 			       expand_normal (range),
12967 			       TYPE_UNSIGNED (TREE_TYPE (range))),
12968 		table_label, default_label, default_probability);
12969   return 1;
12970 }
12971 
12972 /* Return a CONST_VECTOR rtx representing vector mask for
12973    a VECTOR_CST of booleans.  */
12974 static rtx
const_vector_mask_from_tree(tree exp)12975 const_vector_mask_from_tree (tree exp)
12976 {
12977   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12978   machine_mode inner = GET_MODE_INNER (mode);
12979 
12980   rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12981 			      VECTOR_CST_NELTS_PER_PATTERN (exp));
12982   unsigned int count = builder.encoded_nelts ();
12983   for (unsigned int i = 0; i < count; ++i)
12984     {
12985       tree elt = VECTOR_CST_ELT (exp, i);
12986       gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12987       if (integer_zerop (elt))
12988 	builder.quick_push (CONST0_RTX (inner));
12989       else if (integer_onep (elt)
12990 	       || integer_minus_onep (elt))
12991 	builder.quick_push (CONSTM1_RTX (inner));
12992       else
12993 	gcc_unreachable ();
12994     }
12995   return builder.build ();
12996 }
12997 
12998 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree.  */
12999 static rtx
const_vector_from_tree(tree exp)13000 const_vector_from_tree (tree exp)
13001 {
13002   machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
13003 
13004   if (initializer_zerop (exp))
13005     return CONST0_RTX (mode);
13006 
13007   if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
13008     return const_vector_mask_from_tree (exp);
13009 
13010   machine_mode inner = GET_MODE_INNER (mode);
13011 
13012   rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
13013 			      VECTOR_CST_NELTS_PER_PATTERN (exp));
13014   unsigned int count = builder.encoded_nelts ();
13015   for (unsigned int i = 0; i < count; ++i)
13016     {
13017       tree elt = VECTOR_CST_ELT (exp, i);
13018       if (TREE_CODE (elt) == REAL_CST)
13019 	builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
13020 							  inner));
13021       else if (TREE_CODE (elt) == FIXED_CST)
13022 	builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
13023 							  inner));
13024       else
13025 	builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
13026 						  inner));
13027     }
13028   return builder.build ();
13029 }
13030 
13031 /* Build a decl for a personality function given a language prefix.  */
13032 
13033 tree
build_personality_function(const char * lang)13034 build_personality_function (const char *lang)
13035 {
13036   const char *unwind_and_version;
13037   tree decl, type;
13038   char *name;
13039 
13040   switch (targetm_common.except_unwind_info (&global_options))
13041     {
13042     case UI_NONE:
13043       return NULL;
13044     case UI_SJLJ:
13045       unwind_and_version = "_sj0";
13046       break;
13047     case UI_DWARF2:
13048     case UI_TARGET:
13049       unwind_and_version = "_v0";
13050       break;
13051     case UI_SEH:
13052       unwind_and_version = "_seh0";
13053       break;
13054     default:
13055       gcc_unreachable ();
13056     }
13057 
13058   name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
13059 
13060   type = build_function_type_list (unsigned_type_node,
13061 				   integer_type_node, integer_type_node,
13062 				   long_long_unsigned_type_node,
13063 				   ptr_type_node, ptr_type_node, NULL_TREE);
13064   decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
13065 		     get_identifier (name), type);
13066   DECL_ARTIFICIAL (decl) = 1;
13067   DECL_EXTERNAL (decl) = 1;
13068   TREE_PUBLIC (decl) = 1;
13069 
13070   /* Zap the nonsensical SYMBOL_REF_DECL for this.  What we're left with
13071      are the flags assigned by targetm.encode_section_info.  */
13072   SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
13073 
13074   return decl;
13075 }
13076 
13077 /* Extracts the personality function of DECL and returns the corresponding
13078    libfunc.  */
13079 
13080 rtx
get_personality_function(tree decl)13081 get_personality_function (tree decl)
13082 {
13083   tree personality = DECL_FUNCTION_PERSONALITY (decl);
13084   enum eh_personality_kind pk;
13085 
13086   pk = function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl));
13087   if (pk == eh_personality_none)
13088     return NULL;
13089 
13090   if (!personality
13091       && pk == eh_personality_any)
13092     personality = lang_hooks.eh_personality ();
13093 
13094   if (pk == eh_personality_lang)
13095     gcc_assert (personality != NULL_TREE);
13096 
13097   return XEXP (DECL_RTL (personality), 0);
13098 }
13099 
13100 /* Returns a tree for the size of EXP in bytes.  */
13101 
13102 static tree
tree_expr_size(const_tree exp)13103 tree_expr_size (const_tree exp)
13104 {
13105   if (DECL_P (exp)
13106       && DECL_SIZE_UNIT (exp) != 0)
13107     return DECL_SIZE_UNIT (exp);
13108   else
13109     return size_in_bytes (TREE_TYPE (exp));
13110 }
13111 
13112 /* Return an rtx for the size in bytes of the value of EXP.  */
13113 
13114 rtx
expr_size(tree exp)13115 expr_size (tree exp)
13116 {
13117   tree size;
13118 
13119   if (TREE_CODE (exp) == WITH_SIZE_EXPR)
13120     size = TREE_OPERAND (exp, 1);
13121   else
13122     {
13123       size = tree_expr_size (exp);
13124       gcc_assert (size);
13125       gcc_assert (size == SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, exp));
13126     }
13127 
13128   return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), EXPAND_NORMAL);
13129 }
13130 
13131 /* Return a wide integer for the size in bytes of the value of EXP, or -1
13132    if the size can vary or is larger than an integer.  */
13133 
13134 static HOST_WIDE_INT
int_expr_size(tree exp)13135 int_expr_size (tree exp)
13136 {
13137   tree size;
13138 
13139   if (TREE_CODE (exp) == WITH_SIZE_EXPR)
13140     size = TREE_OPERAND (exp, 1);
13141   else
13142     {
13143       size = tree_expr_size (exp);
13144       gcc_assert (size);
13145     }
13146 
13147   if (size == 0 || !tree_fits_shwi_p (size))
13148     return -1;
13149 
13150   return tree_to_shwi (size);
13151 }
13152