xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/internal-fn.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Internal functions.
2    Copyright (C) 2011-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "ubsan.h"
43 #include "recog.h"
44 #include "builtins.h"
45 #include "optabs-tree.h"
46 
47 /* The names of each internal function, indexed by function number.  */
48 const char *const internal_fn_name_array[] = {
49 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
50 #include "internal-fn.def"
51   "<invalid-fn>"
52 };
53 
54 /* The ECF_* flags of each internal function, indexed by function number.  */
55 const int internal_fn_flags_array[] = {
56 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
57 #include "internal-fn.def"
58   0
59 };
60 
61 /* Fnspec of each internal function, indexed by function number.  */
62 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
63 
64 void
65 init_internal_fns ()
66 {
67 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
68   if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
69     build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
70 #include "internal-fn.def"
71   internal_fn_fnspec_array[IFN_LAST] = 0;
72 }
73 
74 /* Create static initializers for the information returned by
75    direct_internal_fn.  */
76 #define not_direct { -2, -2, false }
77 #define mask_load_direct { -1, 2, false }
78 #define load_lanes_direct { -1, -1, false }
79 #define mask_store_direct { 3, 2, false }
80 #define store_lanes_direct { 0, 0, false }
81 #define unary_direct { 0, 0, true }
82 #define binary_direct { 0, 0, true }
83 
84 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
85 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
86 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
87 #include "internal-fn.def"
88   not_direct
89 };
90 
91 /* ARRAY_TYPE is an array of vector modes.  Return the associated insn
92    for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none.  */
93 
94 static enum insn_code
95 get_multi_vector_move (tree array_type, convert_optab optab)
96 {
97   machine_mode imode;
98   machine_mode vmode;
99 
100   gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
101   imode = TYPE_MODE (array_type);
102   vmode = TYPE_MODE (TREE_TYPE (array_type));
103 
104   return convert_optab_handler (optab, imode, vmode);
105 }
106 
107 /* Expand LOAD_LANES call STMT using optab OPTAB.  */
108 
109 static void
110 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
111 {
112   struct expand_operand ops[2];
113   tree type, lhs, rhs;
114   rtx target, mem;
115 
116   lhs = gimple_call_lhs (stmt);
117   rhs = gimple_call_arg (stmt, 0);
118   type = TREE_TYPE (lhs);
119 
120   target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
121   mem = expand_normal (rhs);
122 
123   gcc_assert (MEM_P (mem));
124   PUT_MODE (mem, TYPE_MODE (type));
125 
126   create_output_operand (&ops[0], target, TYPE_MODE (type));
127   create_fixed_operand (&ops[1], mem);
128   expand_insn (get_multi_vector_move (type, optab), 2, ops);
129 }
130 
131 /* Expand STORE_LANES call STMT using optab OPTAB.  */
132 
133 static void
134 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
135 {
136   struct expand_operand ops[2];
137   tree type, lhs, rhs;
138   rtx target, reg;
139 
140   lhs = gimple_call_lhs (stmt);
141   rhs = gimple_call_arg (stmt, 0);
142   type = TREE_TYPE (rhs);
143 
144   target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
145   reg = expand_normal (rhs);
146 
147   gcc_assert (MEM_P (target));
148   PUT_MODE (target, TYPE_MODE (type));
149 
150   create_fixed_operand (&ops[0], target);
151   create_input_operand (&ops[1], reg, TYPE_MODE (type));
152   expand_insn (get_multi_vector_move (type, optab), 2, ops);
153 }
154 
155 static void
156 expand_ANNOTATE (internal_fn, gcall *)
157 {
158   gcc_unreachable ();
159 }
160 
161 /* This should get expanded in omp_device_lower pass.  */
162 
163 static void
164 expand_GOMP_USE_SIMT (internal_fn, gcall *)
165 {
166   gcc_unreachable ();
167 }
168 
169 /* This should get expanded in omp_device_lower pass.  */
170 
171 static void
172 expand_GOMP_SIMT_ENTER (internal_fn, gcall *)
173 {
174   gcc_unreachable ();
175 }
176 
177 /* Allocate per-lane storage and begin non-uniform execution region.  */
178 
179 static void
180 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
181 {
182   rtx target;
183   tree lhs = gimple_call_lhs (stmt);
184   if (lhs)
185     target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
186   else
187     target = gen_reg_rtx (Pmode);
188   rtx size = expand_normal (gimple_call_arg (stmt, 0));
189   rtx align = expand_normal (gimple_call_arg (stmt, 1));
190   struct expand_operand ops[3];
191   create_output_operand (&ops[0], target, Pmode);
192   create_input_operand (&ops[1], size, Pmode);
193   create_input_operand (&ops[2], align, Pmode);
194   gcc_assert (targetm.have_omp_simt_enter ());
195   expand_insn (targetm.code_for_omp_simt_enter, 3, ops);
196 }
197 
198 /* Deallocate per-lane storage and leave non-uniform execution region.  */
199 
200 static void
201 expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
202 {
203   gcc_checking_assert (!gimple_call_lhs (stmt));
204   rtx arg = expand_normal (gimple_call_arg (stmt, 0));
205   struct expand_operand ops[1];
206   create_input_operand (&ops[0], arg, Pmode);
207   gcc_assert (targetm.have_omp_simt_exit ());
208   expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
209 }
210 
211 /* Lane index on SIMT targets: thread index in the warp on NVPTX.  On targets
212    without SIMT execution this should be expanded in omp_device_lower pass.  */
213 
214 static void
215 expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
216 {
217   tree lhs = gimple_call_lhs (stmt);
218   if (!lhs)
219     return;
220 
221   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
222   gcc_assert (targetm.have_omp_simt_lane ());
223   emit_insn (targetm.gen_omp_simt_lane (target));
224 }
225 
226 /* This should get expanded in omp_device_lower pass.  */
227 
228 static void
229 expand_GOMP_SIMT_VF (internal_fn, gcall *)
230 {
231   gcc_unreachable ();
232 }
233 
234 /* Lane index of the first SIMT lane that supplies a non-zero argument.
235    This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
236    lane that executed the last iteration for handling OpenMP lastprivate.  */
237 
238 static void
239 expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
240 {
241   tree lhs = gimple_call_lhs (stmt);
242   if (!lhs)
243     return;
244 
245   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
246   rtx cond = expand_normal (gimple_call_arg (stmt, 0));
247   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
248   struct expand_operand ops[2];
249   create_output_operand (&ops[0], target, mode);
250   create_input_operand (&ops[1], cond, mode);
251   gcc_assert (targetm.have_omp_simt_last_lane ());
252   expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
253 }
254 
255 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered".  */
256 
257 static void
258 expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
259 {
260   tree lhs = gimple_call_lhs (stmt);
261   if (!lhs)
262     return;
263 
264   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
265   rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
266   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
267   struct expand_operand ops[2];
268   create_output_operand (&ops[0], target, mode);
269   create_input_operand (&ops[1], ctr, mode);
270   gcc_assert (targetm.have_omp_simt_ordered ());
271   expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
272 }
273 
274 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
275    any lane supplies a non-zero argument.  */
276 
277 static void
278 expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
279 {
280   tree lhs = gimple_call_lhs (stmt);
281   if (!lhs)
282     return;
283 
284   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
285   rtx cond = expand_normal (gimple_call_arg (stmt, 0));
286   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
287   struct expand_operand ops[2];
288   create_output_operand (&ops[0], target, mode);
289   create_input_operand (&ops[1], cond, mode);
290   gcc_assert (targetm.have_omp_simt_vote_any ());
291   expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
292 }
293 
294 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
295    is destination lane index XOR given offset.  */
296 
297 static void
298 expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
299 {
300   tree lhs = gimple_call_lhs (stmt);
301   if (!lhs)
302     return;
303 
304   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
305   rtx src = expand_normal (gimple_call_arg (stmt, 0));
306   rtx idx = expand_normal (gimple_call_arg (stmt, 1));
307   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
308   struct expand_operand ops[3];
309   create_output_operand (&ops[0], target, mode);
310   create_input_operand (&ops[1], src, mode);
311   create_input_operand (&ops[2], idx, SImode);
312   gcc_assert (targetm.have_omp_simt_xchg_bfly ());
313   expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
314 }
315 
316 /* Exchange between SIMT lanes according to given source lane index.  */
317 
318 static void
319 expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
320 {
321   tree lhs = gimple_call_lhs (stmt);
322   if (!lhs)
323     return;
324 
325   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
326   rtx src = expand_normal (gimple_call_arg (stmt, 0));
327   rtx idx = expand_normal (gimple_call_arg (stmt, 1));
328   machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
329   struct expand_operand ops[3];
330   create_output_operand (&ops[0], target, mode);
331   create_input_operand (&ops[1], src, mode);
332   create_input_operand (&ops[2], idx, SImode);
333   gcc_assert (targetm.have_omp_simt_xchg_idx ());
334   expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
335 }
336 
337 /* This should get expanded in adjust_simduid_builtins.  */
338 
339 static void
340 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
341 {
342   gcc_unreachable ();
343 }
344 
345 /* This should get expanded in adjust_simduid_builtins.  */
346 
347 static void
348 expand_GOMP_SIMD_VF (internal_fn, gcall *)
349 {
350   gcc_unreachable ();
351 }
352 
353 /* This should get expanded in adjust_simduid_builtins.  */
354 
355 static void
356 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
357 {
358   gcc_unreachable ();
359 }
360 
361 /* This should get expanded in adjust_simduid_builtins.  */
362 
363 static void
364 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
365 {
366   gcc_unreachable ();
367 }
368 
369 /* This should get expanded in adjust_simduid_builtins.  */
370 
371 static void
372 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
373 {
374   gcc_unreachable ();
375 }
376 
377 /* This should get expanded in the sanopt pass.  */
378 
379 static void
380 expand_UBSAN_NULL (internal_fn, gcall *)
381 {
382   gcc_unreachable ();
383 }
384 
385 /* This should get expanded in the sanopt pass.  */
386 
387 static void
388 expand_UBSAN_BOUNDS (internal_fn, gcall *)
389 {
390   gcc_unreachable ();
391 }
392 
393 /* This should get expanded in the sanopt pass.  */
394 
395 static void
396 expand_UBSAN_VPTR (internal_fn, gcall *)
397 {
398   gcc_unreachable ();
399 }
400 
401 /* This should get expanded in the sanopt pass.  */
402 
403 static void
404 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
405 {
406   gcc_unreachable ();
407 }
408 
409 /* This should get expanded in the sanopt pass.  */
410 
411 static void
412 expand_ASAN_CHECK (internal_fn, gcall *)
413 {
414   gcc_unreachable ();
415 }
416 
417 /* This should get expanded in the sanopt pass.  */
418 
419 static void
420 expand_ASAN_MARK (internal_fn, gcall *)
421 {
422   gcc_unreachable ();
423 }
424 
425 /* This should get expanded in the sanopt pass.  */
426 
427 static void
428 expand_ASAN_POISON (internal_fn, gcall *)
429 {
430   gcc_unreachable ();
431 }
432 
433 /* This should get expanded in the sanopt pass.  */
434 
435 static void
436 expand_ASAN_POISON_USE (internal_fn, gcall *)
437 {
438   gcc_unreachable ();
439 }
440 
441 /* This should get expanded in the tsan pass.  */
442 
443 static void
444 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
445 {
446   gcc_unreachable ();
447 }
448 
449 /* This should get expanded in the lower pass.  */
450 
451 static void
452 expand_FALLTHROUGH (internal_fn, gcall *call)
453 {
454   error_at (gimple_location (call),
455 	    "invalid use of attribute %<fallthrough%>");
456 }
457 
458 /* Return minimum precision needed to represent all values
459    of ARG in SIGNed integral type.  */
460 
461 static int
462 get_min_precision (tree arg, signop sign)
463 {
464   int prec = TYPE_PRECISION (TREE_TYPE (arg));
465   int cnt = 0;
466   signop orig_sign = sign;
467   if (TREE_CODE (arg) == INTEGER_CST)
468     {
469       int p;
470       if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
471 	{
472 	  widest_int w = wi::to_widest (arg);
473 	  w = wi::ext (w, prec, sign);
474 	  p = wi::min_precision (w, sign);
475 	}
476       else
477 	p = wi::min_precision (arg, sign);
478       return MIN (p, prec);
479     }
480   while (CONVERT_EXPR_P (arg)
481 	 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
482 	 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
483     {
484       arg = TREE_OPERAND (arg, 0);
485       if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
486 	{
487 	  if (TYPE_UNSIGNED (TREE_TYPE (arg)))
488 	    sign = UNSIGNED;
489 	  else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
490 	    return prec + (orig_sign != sign);
491 	  prec = TYPE_PRECISION (TREE_TYPE (arg));
492 	}
493       if (++cnt > 30)
494 	return prec + (orig_sign != sign);
495     }
496   if (TREE_CODE (arg) != SSA_NAME)
497     return prec + (orig_sign != sign);
498   wide_int arg_min, arg_max;
499   while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
500     {
501       gimple *g = SSA_NAME_DEF_STMT (arg);
502       if (is_gimple_assign (g)
503 	  && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
504 	{
505 	  tree t = gimple_assign_rhs1 (g);
506 	  if (INTEGRAL_TYPE_P (TREE_TYPE (t))
507 	      && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
508 	    {
509 	      arg = t;
510 	      if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
511 		{
512 		  if (TYPE_UNSIGNED (TREE_TYPE (arg)))
513 		    sign = UNSIGNED;
514 		  else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
515 		    return prec + (orig_sign != sign);
516 		  prec = TYPE_PRECISION (TREE_TYPE (arg));
517 		}
518 	      if (++cnt > 30)
519 		return prec + (orig_sign != sign);
520 	      continue;
521 	    }
522 	}
523       return prec + (orig_sign != sign);
524     }
525   if (sign == TYPE_SIGN (TREE_TYPE (arg)))
526     {
527       int p1 = wi::min_precision (arg_min, sign);
528       int p2 = wi::min_precision (arg_max, sign);
529       p1 = MAX (p1, p2);
530       prec = MIN (prec, p1);
531     }
532   else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
533     {
534       int p = wi::min_precision (arg_max, UNSIGNED);
535       prec = MIN (prec, p);
536     }
537   return prec + (orig_sign != sign);
538 }
539 
540 /* Helper for expand_*_overflow.  Set the __imag__ part to true
541    (1 except for signed:1 type, in which case store -1).  */
542 
543 static void
544 expand_arith_set_overflow (tree lhs, rtx target)
545 {
546   if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
547       && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
548     write_complex_part (target, constm1_rtx, true);
549   else
550     write_complex_part (target, const1_rtx, true);
551 }
552 
553 /* Helper for expand_*_overflow.  Store RES into the __real__ part
554    of TARGET.  If RES has larger MODE than __real__ part of TARGET,
555    set the __imag__ part to 1 if RES doesn't fit into it.  Similarly
556    if LHS has smaller precision than its mode.  */
557 
558 static void
559 expand_arith_overflow_result_store (tree lhs, rtx target,
560 				    machine_mode mode, rtx res)
561 {
562   machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
563   rtx lres = res;
564   if (tgtmode != mode)
565     {
566       rtx_code_label *done_label = gen_label_rtx ();
567       int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
568       lres = convert_modes (tgtmode, mode, res, uns);
569       gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
570       do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
571 			       EQ, true, mode, NULL_RTX, NULL, done_label,
572 			       PROB_VERY_LIKELY);
573       expand_arith_set_overflow (lhs, target);
574       emit_label (done_label);
575     }
576   int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
577   int tgtprec = GET_MODE_PRECISION (tgtmode);
578   if (prec < tgtprec)
579     {
580       rtx_code_label *done_label = gen_label_rtx ();
581       int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
582       res = lres;
583       if (uns)
584 	{
585 	  rtx mask
586 	    = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
587 				    tgtmode);
588 	  lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
589 				      true, OPTAB_LIB_WIDEN);
590 	}
591       else
592 	{
593 	  lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
594 			       NULL_RTX, 1);
595 	  lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
596 			       NULL_RTX, 0);
597 	}
598       do_compare_rtx_and_jump (res, lres,
599 			       EQ, true, tgtmode, NULL_RTX, NULL, done_label,
600 			       PROB_VERY_LIKELY);
601       expand_arith_set_overflow (lhs, target);
602       emit_label (done_label);
603     }
604   write_complex_part (target, lres, false);
605 }
606 
607 /* Helper for expand_*_overflow.  Store RES into TARGET.  */
608 
609 static void
610 expand_ubsan_result_store (rtx target, rtx res)
611 {
612   if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
613     /* If this is a scalar in a register that is stored in a wider mode
614        than the declared mode, compute the result into its declared mode
615        and then convert to the wider mode.  Our value is the computed
616        expression.  */
617     convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
618   else
619     emit_move_insn (target, res);
620 }
621 
622 /* Add sub/add overflow checking to the statement STMT.
623    CODE says whether the operation is +, or -.  */
624 
625 static void
626 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
627 			tree arg0, tree arg1, bool unsr_p, bool uns0_p,
628 			bool uns1_p, bool is_ubsan, tree *datap)
629 {
630   rtx res, target = NULL_RTX;
631   tree fn;
632   rtx_code_label *done_label = gen_label_rtx ();
633   rtx_code_label *do_error = gen_label_rtx ();
634   do_pending_stack_adjust ();
635   rtx op0 = expand_normal (arg0);
636   rtx op1 = expand_normal (arg1);
637   machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
638   int prec = GET_MODE_PRECISION (mode);
639   rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
640   bool do_xor = false;
641 
642   if (is_ubsan)
643     gcc_assert (!unsr_p && !uns0_p && !uns1_p);
644 
645   if (lhs)
646     {
647       target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
648       if (!is_ubsan)
649 	write_complex_part (target, const0_rtx, true);
650     }
651 
652   /* We assume both operands and result have the same precision
653      here (GET_MODE_BITSIZE (mode)), S stands for signed type
654      with that precision, U for unsigned type with that precision,
655      sgn for unsigned most significant bit in that precision.
656      s1 is signed first operand, u1 is unsigned first operand,
657      s2 is signed second operand, u2 is unsigned second operand,
658      sr is signed result, ur is unsigned result and the following
659      rules say how to compute result (which is always result of
660      the operands as if both were unsigned, cast to the right
661      signedness) and how to compute whether operation overflowed.
662 
663      s1 + s2 -> sr
664 	res = (S) ((U) s1 + (U) s2)
665 	ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
666      s1 - s2 -> sr
667 	res = (S) ((U) s1 - (U) s2)
668 	ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
669      u1 + u2 -> ur
670 	res = u1 + u2
671 	ovf = res < u1 (or jump on carry, but RTL opts will handle it)
672      u1 - u2 -> ur
673 	res = u1 - u2
674 	ovf = res > u1 (or jump on carry, but RTL opts will handle it)
675      s1 + u2 -> sr
676 	res = (S) ((U) s1 + u2)
677 	ovf = ((U) res ^ sgn) < u2
678      s1 + u2 -> ur
679 	t1 = (S) (u2 ^ sgn)
680 	t2 = s1 + t1
681 	res = (U) t2 ^ sgn
682 	ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
683      s1 - u2 -> sr
684 	res = (S) ((U) s1 - u2)
685 	ovf = u2 > ((U) s1 ^ sgn)
686      s1 - u2 -> ur
687 	res = (U) s1 - u2
688 	ovf = s1 < 0 || u2 > (U) s1
689      u1 - s2 -> sr
690 	res = u1 - (U) s2
691  	ovf = u1 >= ((U) s2 ^ sgn)
692      u1 - s2 -> ur
693 	t1 = u1 ^ sgn
694 	t2 = t1 - (U) s2
695 	res = t2 ^ sgn
696 	ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
697      s1 + s2 -> ur
698 	res = (U) s1 + (U) s2
699 	ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
700      u1 + u2 -> sr
701 	res = (S) (u1 + u2)
702 	ovf = (U) res < u2 || res < 0
703      u1 - u2 -> sr
704 	res = (S) (u1 - u2)
705 	ovf = u1 >= u2 ? res < 0 : res >= 0
706      s1 - s2 -> ur
707 	res = (U) s1 - (U) s2
708 	ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0)  */
709 
710   if (code == PLUS_EXPR && uns0_p && !uns1_p)
711     {
712       /* PLUS_EXPR is commutative, if operand signedness differs,
713 	 canonicalize to the first operand being signed and second
714 	 unsigned to simplify following code.  */
715       std::swap (op0, op1);
716       std::swap (arg0, arg1);
717       uns0_p = false;
718       uns1_p = true;
719     }
720 
721   /* u1 +- u2 -> ur  */
722   if (uns0_p && uns1_p && unsr_p)
723     {
724       insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
725                                        : usubv4_optab, mode);
726       if (icode != CODE_FOR_nothing)
727 	{
728 	  struct expand_operand ops[4];
729 	  rtx_insn *last = get_last_insn ();
730 
731 	  res = gen_reg_rtx (mode);
732 	  create_output_operand (&ops[0], res, mode);
733 	  create_input_operand (&ops[1], op0, mode);
734 	  create_input_operand (&ops[2], op1, mode);
735 	  create_fixed_operand (&ops[3], do_error);
736 	  if (maybe_expand_insn (icode, 4, ops))
737 	    {
738 	      last = get_last_insn ();
739 	      if (profile_status_for_fn (cfun) != PROFILE_ABSENT
740 		  && JUMP_P (last)
741 		  && any_condjump_p (last)
742 		  && !find_reg_note (last, REG_BR_PROB, 0))
743 		add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
744 	      emit_jump (done_label);
745 	      goto do_error_label;
746 	    }
747 
748 	  delete_insns_since (last);
749 	}
750 
751       /* Compute the operation.  On RTL level, the addition is always
752 	 unsigned.  */
753       res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
754 			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
755       rtx tem = op0;
756       /* For PLUS_EXPR, the operation is commutative, so we can pick
757 	 operand to compare against.  For prec <= BITS_PER_WORD, I think
758 	 preferring REG operand is better over CONST_INT, because
759 	 the CONST_INT might enlarge the instruction or CSE would need
760 	 to figure out we'd already loaded it into a register before.
761 	 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
762 	 as then the multi-word comparison can be perhaps simplified.  */
763       if (code == PLUS_EXPR
764 	  && (prec <= BITS_PER_WORD
765 	      ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
766 	      : CONST_SCALAR_INT_P (op1)))
767 	tem = op1;
768       do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
769 			       true, mode, NULL_RTX, NULL, done_label,
770 			       PROB_VERY_LIKELY);
771       goto do_error_label;
772     }
773 
774   /* s1 +- u2 -> sr  */
775   if (!uns0_p && uns1_p && !unsr_p)
776     {
777       /* Compute the operation.  On RTL level, the addition is always
778 	 unsigned.  */
779       res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
780 			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
781       rtx tem = expand_binop (mode, add_optab,
782 			      code == PLUS_EXPR ? res : op0, sgn,
783 			      NULL_RTX, false, OPTAB_LIB_WIDEN);
784       do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
785 			       done_label, PROB_VERY_LIKELY);
786       goto do_error_label;
787     }
788 
789   /* s1 + u2 -> ur  */
790   if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
791     {
792       op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
793 			  OPTAB_LIB_WIDEN);
794       /* As we've changed op1, we have to avoid using the value range
795 	 for the original argument.  */
796       arg1 = error_mark_node;
797       do_xor = true;
798       goto do_signed;
799     }
800 
801   /* u1 - s2 -> ur  */
802   if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
803     {
804       op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
805 			  OPTAB_LIB_WIDEN);
806       /* As we've changed op0, we have to avoid using the value range
807 	 for the original argument.  */
808       arg0 = error_mark_node;
809       do_xor = true;
810       goto do_signed;
811     }
812 
813   /* s1 - u2 -> ur  */
814   if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
815     {
816       /* Compute the operation.  On RTL level, the addition is always
817 	 unsigned.  */
818       res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
819 			  OPTAB_LIB_WIDEN);
820       int pos_neg = get_range_pos_neg (arg0);
821       if (pos_neg == 2)
822 	/* If ARG0 is known to be always negative, this is always overflow.  */
823 	emit_jump (do_error);
824       else if (pos_neg == 3)
825 	/* If ARG0 is not known to be always positive, check at runtime.  */
826 	do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
827 				 NULL, do_error, PROB_VERY_UNLIKELY);
828       do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
829 			       done_label, PROB_VERY_LIKELY);
830       goto do_error_label;
831     }
832 
833   /* u1 - s2 -> sr  */
834   if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
835     {
836       /* Compute the operation.  On RTL level, the addition is always
837 	 unsigned.  */
838       res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
839 			  OPTAB_LIB_WIDEN);
840       rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
841 			      OPTAB_LIB_WIDEN);
842       do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
843 			       done_label, PROB_VERY_LIKELY);
844       goto do_error_label;
845     }
846 
847   /* u1 + u2 -> sr  */
848   if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
849     {
850       /* Compute the operation.  On RTL level, the addition is always
851 	 unsigned.  */
852       res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
853 			  OPTAB_LIB_WIDEN);
854       do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
855 			       NULL, do_error, PROB_VERY_UNLIKELY);
856       rtx tem = op1;
857       /* The operation is commutative, so we can pick operand to compare
858 	 against.  For prec <= BITS_PER_WORD, I think preferring REG operand
859 	 is better over CONST_INT, because the CONST_INT might enlarge the
860 	 instruction or CSE would need to figure out we'd already loaded it
861 	 into a register before.  For prec > BITS_PER_WORD, I think CONST_INT
862 	 might be more beneficial, as then the multi-word comparison can be
863 	 perhaps simplified.  */
864       if (prec <= BITS_PER_WORD
865 	  ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
866 	  : CONST_SCALAR_INT_P (op0))
867 	tem = op0;
868       do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
869 			       done_label, PROB_VERY_LIKELY);
870       goto do_error_label;
871     }
872 
873   /* s1 +- s2 -> ur  */
874   if (!uns0_p && !uns1_p && unsr_p)
875     {
876       /* Compute the operation.  On RTL level, the addition is always
877 	 unsigned.  */
878       res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
879 			  op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
880       int pos_neg = get_range_pos_neg (arg1);
881       if (code == PLUS_EXPR)
882 	{
883 	  int pos_neg0 = get_range_pos_neg (arg0);
884 	  if (pos_neg0 != 3 && pos_neg == 3)
885 	    {
886 	      std::swap (op0, op1);
887 	      pos_neg = pos_neg0;
888 	    }
889 	}
890       rtx tem;
891       if (pos_neg != 3)
892 	{
893 	  tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
894 				    ? and_optab : ior_optab,
895 			      op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
896 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
897 				   NULL, done_label, PROB_VERY_LIKELY);
898 	}
899       else
900 	{
901 	  rtx_code_label *do_ior_label = gen_label_rtx ();
902 	  do_compare_rtx_and_jump (op1, const0_rtx,
903 				   code == MINUS_EXPR ? GE : LT, false, mode,
904 				   NULL_RTX, NULL, do_ior_label,
905 				   PROB_EVEN);
906 	  tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
907 			      OPTAB_LIB_WIDEN);
908 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
909 				   NULL, done_label, PROB_VERY_LIKELY);
910 	  emit_jump (do_error);
911 	  emit_label (do_ior_label);
912 	  tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
913 			      OPTAB_LIB_WIDEN);
914 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
915 				   NULL, done_label, PROB_VERY_LIKELY);
916 	}
917       goto do_error_label;
918     }
919 
920   /* u1 - u2 -> sr  */
921   if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
922     {
923       /* Compute the operation.  On RTL level, the addition is always
924 	 unsigned.  */
925       res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
926 			  OPTAB_LIB_WIDEN);
927       rtx_code_label *op0_geu_op1 = gen_label_rtx ();
928       do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
929 			       op0_geu_op1, PROB_EVEN);
930       do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
931 			       NULL, done_label, PROB_VERY_LIKELY);
932       emit_jump (do_error);
933       emit_label (op0_geu_op1);
934       do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
935 			       NULL, done_label, PROB_VERY_LIKELY);
936       goto do_error_label;
937     }
938 
939   gcc_assert (!uns0_p && !uns1_p && !unsr_p);
940 
941   /* s1 +- s2 -> sr  */
942  do_signed:
943   {
944     insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
945 				     : subv4_optab, mode);
946     if (icode != CODE_FOR_nothing)
947       {
948 	struct expand_operand ops[4];
949 	rtx_insn *last = get_last_insn ();
950 
951 	res = gen_reg_rtx (mode);
952 	create_output_operand (&ops[0], res, mode);
953 	create_input_operand (&ops[1], op0, mode);
954 	create_input_operand (&ops[2], op1, mode);
955 	create_fixed_operand (&ops[3], do_error);
956 	if (maybe_expand_insn (icode, 4, ops))
957 	  {
958 	    last = get_last_insn ();
959 	    if (profile_status_for_fn (cfun) != PROFILE_ABSENT
960 		&& JUMP_P (last)
961 		&& any_condjump_p (last)
962 		&& !find_reg_note (last, REG_BR_PROB, 0))
963 	      add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
964 	    emit_jump (done_label);
965 	    goto do_error_label;
966 	  }
967 
968 	delete_insns_since (last);
969       }
970 
971     /* Compute the operation.  On RTL level, the addition is always
972        unsigned.  */
973     res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
974 			op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
975 
976     /* If we can prove that one of the arguments (for MINUS_EXPR only
977        the second operand, as subtraction is not commutative) is always
978        non-negative or always negative, we can do just one comparison
979        and conditional jump.  */
980     int pos_neg = get_range_pos_neg (arg1);
981     if (code == PLUS_EXPR)
982       {
983 	int pos_neg0 = get_range_pos_neg (arg0);
984 	if (pos_neg0 != 3 && pos_neg == 3)
985 	  {
986 	    std::swap (op0, op1);
987 	    pos_neg = pos_neg0;
988 	  }
989       }
990 
991     /* Addition overflows if and only if the two operands have the same sign,
992        and the result has the opposite sign.  Subtraction overflows if and
993        only if the two operands have opposite sign, and the subtrahend has
994        the same sign as the result.  Here 0 is counted as positive.  */
995     if (pos_neg == 3)
996       {
997 	/* Compute op0 ^ op1 (operands have opposite sign).  */
998         rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
999 				   OPTAB_LIB_WIDEN);
1000 
1001 	/* Compute res ^ op1 (result and 2nd operand have opposite sign).  */
1002 	rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1003 				    OPTAB_LIB_WIDEN);
1004 
1005 	rtx tem;
1006 	if (code == PLUS_EXPR)
1007 	  {
1008 	    /* Compute (res ^ op1) & ~(op0 ^ op1).  */
1009 	    tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1010 	    tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1011 				OPTAB_LIB_WIDEN);
1012 	  }
1013 	else
1014 	  {
1015 	    /* Compute (op0 ^ op1) & ~(res ^ op1).  */
1016 	    tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1017 	    tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1018 				OPTAB_LIB_WIDEN);
1019 	  }
1020 
1021 	/* No overflow if the result has bit sign cleared.  */
1022 	do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1023 				 NULL, done_label, PROB_VERY_LIKELY);
1024       }
1025 
1026     /* Compare the result of the operation with the first operand.
1027        No overflow for addition if second operand is positive and result
1028        is larger or second operand is negative and result is smaller.
1029        Likewise for subtraction with sign of second operand flipped.  */
1030     else
1031       do_compare_rtx_and_jump (res, op0,
1032 			       (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1033 			       false, mode, NULL_RTX, NULL, done_label,
1034 			       PROB_VERY_LIKELY);
1035   }
1036 
1037  do_error_label:
1038   emit_label (do_error);
1039   if (is_ubsan)
1040     {
1041       /* Expand the ubsan builtin call.  */
1042       push_temp_slots ();
1043       fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1044 					 arg0, arg1, datap);
1045       expand_normal (fn);
1046       pop_temp_slots ();
1047       do_pending_stack_adjust ();
1048     }
1049   else if (lhs)
1050     expand_arith_set_overflow (lhs, target);
1051 
1052   /* We're done.  */
1053   emit_label (done_label);
1054 
1055   if (lhs)
1056     {
1057       if (is_ubsan)
1058 	expand_ubsan_result_store (target, res);
1059       else
1060 	{
1061 	  if (do_xor)
1062 	    res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1063 				OPTAB_LIB_WIDEN);
1064 
1065 	  expand_arith_overflow_result_store (lhs, target, mode, res);
1066 	}
1067     }
1068 }
1069 
1070 /* Add negate overflow checking to the statement STMT.  */
1071 
1072 static void
1073 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1074 		     tree *datap)
1075 {
1076   rtx res, op1;
1077   tree fn;
1078   rtx_code_label *done_label, *do_error;
1079   rtx target = NULL_RTX;
1080 
1081   done_label = gen_label_rtx ();
1082   do_error = gen_label_rtx ();
1083 
1084   do_pending_stack_adjust ();
1085   op1 = expand_normal (arg1);
1086 
1087   machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
1088   if (lhs)
1089     {
1090       target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1091       if (!is_ubsan)
1092 	write_complex_part (target, const0_rtx, true);
1093     }
1094 
1095   enum insn_code icode = optab_handler (negv3_optab, mode);
1096   if (icode != CODE_FOR_nothing)
1097     {
1098       struct expand_operand ops[3];
1099       rtx_insn *last = get_last_insn ();
1100 
1101       res = gen_reg_rtx (mode);
1102       create_output_operand (&ops[0], res, mode);
1103       create_input_operand (&ops[1], op1, mode);
1104       create_fixed_operand (&ops[2], do_error);
1105       if (maybe_expand_insn (icode, 3, ops))
1106 	{
1107 	  last = get_last_insn ();
1108 	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1109 	      && JUMP_P (last)
1110 	      && any_condjump_p (last)
1111 	      && !find_reg_note (last, REG_BR_PROB, 0))
1112 	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1113 	  emit_jump (done_label);
1114         }
1115       else
1116 	{
1117 	  delete_insns_since (last);
1118 	  icode = CODE_FOR_nothing;
1119 	}
1120     }
1121 
1122   if (icode == CODE_FOR_nothing)
1123     {
1124       /* Compute the operation.  On RTL level, the addition is always
1125 	 unsigned.  */
1126       res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1127 
1128       /* Compare the operand with the most negative value.  */
1129       rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1130       do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1131 			       done_label, PROB_VERY_LIKELY);
1132     }
1133 
1134   emit_label (do_error);
1135   if (is_ubsan)
1136     {
1137       /* Expand the ubsan builtin call.  */
1138       push_temp_slots ();
1139       fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1140 					 arg1, NULL_TREE, datap);
1141       expand_normal (fn);
1142       pop_temp_slots ();
1143       do_pending_stack_adjust ();
1144     }
1145   else if (lhs)
1146     expand_arith_set_overflow (lhs, target);
1147 
1148   /* We're done.  */
1149   emit_label (done_label);
1150 
1151   if (lhs)
1152     {
1153       if (is_ubsan)
1154 	expand_ubsan_result_store (target, res);
1155       else
1156 	expand_arith_overflow_result_store (lhs, target, mode, res);
1157     }
1158 }
1159 
1160 /* Add mul overflow checking to the statement STMT.  */
1161 
1162 static void
1163 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1164 		     bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1165 		     tree *datap)
1166 {
1167   rtx res, op0, op1;
1168   tree fn, type;
1169   rtx_code_label *done_label, *do_error;
1170   rtx target = NULL_RTX;
1171   signop sign;
1172   enum insn_code icode;
1173 
1174   done_label = gen_label_rtx ();
1175   do_error = gen_label_rtx ();
1176 
1177   do_pending_stack_adjust ();
1178   op0 = expand_normal (arg0);
1179   op1 = expand_normal (arg1);
1180 
1181   machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
1182   bool uns = unsr_p;
1183   if (lhs)
1184     {
1185       target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1186       if (!is_ubsan)
1187 	write_complex_part (target, const0_rtx, true);
1188     }
1189 
1190   if (is_ubsan)
1191     gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1192 
1193   /* We assume both operands and result have the same precision
1194      here (GET_MODE_BITSIZE (mode)), S stands for signed type
1195      with that precision, U for unsigned type with that precision,
1196      sgn for unsigned most significant bit in that precision.
1197      s1 is signed first operand, u1 is unsigned first operand,
1198      s2 is signed second operand, u2 is unsigned second operand,
1199      sr is signed result, ur is unsigned result and the following
1200      rules say how to compute result (which is always result of
1201      the operands as if both were unsigned, cast to the right
1202      signedness) and how to compute whether operation overflowed.
1203      main_ovf (false) stands for jump on signed multiplication
1204      overflow or the main algorithm with uns == false.
1205      main_ovf (true) stands for jump on unsigned multiplication
1206      overflow or the main algorithm with uns == true.
1207 
1208      s1 * s2 -> sr
1209 	res = (S) ((U) s1 * (U) s2)
1210 	ovf = main_ovf (false)
1211      u1 * u2 -> ur
1212 	res = u1 * u2
1213 	ovf = main_ovf (true)
1214      s1 * u2 -> ur
1215 	res = (U) s1 * u2
1216 	ovf = (s1 < 0 && u2) || main_ovf (true)
1217      u1 * u2 -> sr
1218 	res = (S) (u1 * u2)
1219 	ovf = res < 0 || main_ovf (true)
1220      s1 * u2 -> sr
1221 	res = (S) ((U) s1 * u2)
1222 	ovf = (S) u2 >= 0 ? main_ovf (false)
1223 			  : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1224      s1 * s2 -> ur
1225 	t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1226 	t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1227 	res = t1 * t2
1228 	ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true)  */
1229 
1230   if (uns0_p && !uns1_p)
1231     {
1232       /* Multiplication is commutative, if operand signedness differs,
1233 	 canonicalize to the first operand being signed and second
1234 	 unsigned to simplify following code.  */
1235       std::swap (op0, op1);
1236       std::swap (arg0, arg1);
1237       uns0_p = false;
1238       uns1_p = true;
1239     }
1240 
1241   int pos_neg0 = get_range_pos_neg (arg0);
1242   int pos_neg1 = get_range_pos_neg (arg1);
1243 
1244   /* s1 * u2 -> ur  */
1245   if (!uns0_p && uns1_p && unsr_p)
1246     {
1247       switch (pos_neg0)
1248 	{
1249 	case 1:
1250 	  /* If s1 is non-negative, just perform normal u1 * u2 -> ur.  */
1251 	  goto do_main;
1252 	case 2:
1253 	  /* If s1 is negative, avoid the main code, just multiply and
1254 	     signal overflow if op1 is not 0.  */
1255 	  struct separate_ops ops;
1256 	  ops.code = MULT_EXPR;
1257 	  ops.type = TREE_TYPE (arg1);
1258 	  ops.op0 = make_tree (ops.type, op0);
1259 	  ops.op1 = make_tree (ops.type, op1);
1260 	  ops.op2 = NULL_TREE;
1261 	  ops.location = loc;
1262 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1263 	  do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1264 				   NULL, done_label, PROB_VERY_LIKELY);
1265 	  goto do_error_label;
1266 	case 3:
1267 	  rtx_code_label *do_main_label;
1268 	  do_main_label = gen_label_rtx ();
1269 	  do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1270 				   NULL, do_main_label, PROB_VERY_LIKELY);
1271 	  do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1272 				   NULL, do_main_label, PROB_VERY_LIKELY);
1273 	  expand_arith_set_overflow (lhs, target);
1274 	  emit_label (do_main_label);
1275 	  goto do_main;
1276 	default:
1277 	  gcc_unreachable ();
1278 	}
1279     }
1280 
1281   /* u1 * u2 -> sr  */
1282   if (uns0_p && uns1_p && !unsr_p)
1283     {
1284       uns = true;
1285       /* Rest of handling of this case after res is computed.  */
1286       goto do_main;
1287     }
1288 
1289   /* s1 * u2 -> sr  */
1290   if (!uns0_p && uns1_p && !unsr_p)
1291     {
1292       switch (pos_neg1)
1293 	{
1294 	case 1:
1295 	  goto do_main;
1296 	case 2:
1297 	  /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1298 	     avoid the main code, just multiply and signal overflow
1299 	     unless 0 * u2 or -1 * ((U) Smin).  */
1300 	  struct separate_ops ops;
1301 	  ops.code = MULT_EXPR;
1302 	  ops.type = TREE_TYPE (arg1);
1303 	  ops.op0 = make_tree (ops.type, op0);
1304 	  ops.op1 = make_tree (ops.type, op1);
1305 	  ops.op2 = NULL_TREE;
1306 	  ops.location = loc;
1307 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1308 	  do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1309 				   NULL, done_label, PROB_VERY_LIKELY);
1310 	  do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1311 				   NULL, do_error, PROB_VERY_UNLIKELY);
1312 	  int prec;
1313 	  prec = GET_MODE_PRECISION (mode);
1314 	  rtx sgn;
1315 	  sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1316 	  do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1317 				   NULL, done_label, PROB_VERY_LIKELY);
1318 	  goto do_error_label;
1319 	case 3:
1320 	  /* Rest of handling of this case after res is computed.  */
1321 	  goto do_main;
1322 	default:
1323 	  gcc_unreachable ();
1324 	}
1325     }
1326 
1327   /* s1 * s2 -> ur  */
1328   if (!uns0_p && !uns1_p && unsr_p)
1329     {
1330       rtx tem, tem2;
1331       switch (pos_neg0 | pos_neg1)
1332 	{
1333 	case 1: /* Both operands known to be non-negative.  */
1334 	  goto do_main;
1335 	case 2: /* Both operands known to be negative.  */
1336 	  op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1337 	  op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1338 	  /* Avoid looking at arg0/arg1 ranges, as we've changed
1339 	     the arguments.  */
1340 	  arg0 = error_mark_node;
1341 	  arg1 = error_mark_node;
1342 	  goto do_main;
1343 	case 3:
1344 	  if ((pos_neg0 ^ pos_neg1) == 3)
1345 	    {
1346 	      /* If one operand is known to be negative and the other
1347 		 non-negative, this overflows always, unless the non-negative
1348 		 one is 0.  Just do normal multiply and set overflow
1349 		 unless one of the operands is 0.  */
1350 	      struct separate_ops ops;
1351 	      ops.code = MULT_EXPR;
1352 	      ops.type
1353 		= build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1354 						  1);
1355 	      ops.op0 = make_tree (ops.type, op0);
1356 	      ops.op1 = make_tree (ops.type, op1);
1357 	      ops.op2 = NULL_TREE;
1358 	      ops.location = loc;
1359 	      res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1360 	      tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1361 				  OPTAB_LIB_WIDEN);
1362 	      do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1363 				       NULL_RTX, NULL, done_label,
1364 				       PROB_VERY_LIKELY);
1365 	      goto do_error_label;
1366 	    }
1367 	  /* The general case, do all the needed comparisons at runtime.  */
1368 	  rtx_code_label *do_main_label, *after_negate_label;
1369 	  rtx rop0, rop1;
1370 	  rop0 = gen_reg_rtx (mode);
1371 	  rop1 = gen_reg_rtx (mode);
1372 	  emit_move_insn (rop0, op0);
1373 	  emit_move_insn (rop1, op1);
1374 	  op0 = rop0;
1375 	  op1 = rop1;
1376 	  do_main_label = gen_label_rtx ();
1377 	  after_negate_label = gen_label_rtx ();
1378 	  tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1379 			      OPTAB_LIB_WIDEN);
1380 	  do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1381 				   NULL, after_negate_label, PROB_VERY_LIKELY);
1382 	  /* Both arguments negative here, negate them and continue with
1383 	     normal unsigned overflow checking multiplication.  */
1384 	  emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1385 					    NULL_RTX, false));
1386 	  emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1387 					    NULL_RTX, false));
1388 	  /* Avoid looking at arg0/arg1 ranges, as we might have changed
1389 	     the arguments.  */
1390 	  arg0 = error_mark_node;
1391 	  arg1 = error_mark_node;
1392 	  emit_jump (do_main_label);
1393 	  emit_label (after_negate_label);
1394 	  tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1395 			       OPTAB_LIB_WIDEN);
1396 	  do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1397 				   NULL, do_main_label, PROB_VERY_LIKELY);
1398 	  /* One argument is negative here, the other positive.  This
1399 	     overflows always, unless one of the arguments is 0.  But
1400 	     if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1401 	     is, thus we can keep do_main code oring in overflow as is.  */
1402 	  do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1403 				   NULL, do_main_label, PROB_VERY_LIKELY);
1404 	  expand_arith_set_overflow (lhs, target);
1405 	  emit_label (do_main_label);
1406 	  goto do_main;
1407 	default:
1408 	  gcc_unreachable ();
1409 	}
1410     }
1411 
1412  do_main:
1413   type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1414   sign = uns ? UNSIGNED : SIGNED;
1415   icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1416   if (icode != CODE_FOR_nothing)
1417     {
1418       struct expand_operand ops[4];
1419       rtx_insn *last = get_last_insn ();
1420 
1421       res = gen_reg_rtx (mode);
1422       create_output_operand (&ops[0], res, mode);
1423       create_input_operand (&ops[1], op0, mode);
1424       create_input_operand (&ops[2], op1, mode);
1425       create_fixed_operand (&ops[3], do_error);
1426       if (maybe_expand_insn (icode, 4, ops))
1427 	{
1428 	  last = get_last_insn ();
1429 	  if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1430 	      && JUMP_P (last)
1431 	      && any_condjump_p (last)
1432 	      && !find_reg_note (last, REG_BR_PROB, 0))
1433 	    add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1434 	  emit_jump (done_label);
1435         }
1436       else
1437 	{
1438 	  delete_insns_since (last);
1439 	  icode = CODE_FOR_nothing;
1440 	}
1441     }
1442 
1443   if (icode == CODE_FOR_nothing)
1444     {
1445       struct separate_ops ops;
1446       int prec = GET_MODE_PRECISION (mode);
1447       machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1448       ops.op0 = make_tree (type, op0);
1449       ops.op1 = make_tree (type, op1);
1450       ops.op2 = NULL_TREE;
1451       ops.location = loc;
1452       if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1453 	  && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1454 	{
1455 	  machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1456 	  ops.code = WIDEN_MULT_EXPR;
1457 	  ops.type
1458 	    = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1459 
1460 	  res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1461 	  rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1462 				     NULL_RTX, uns);
1463 	  hipart = convert_modes (mode, wmode, hipart, uns);
1464 	  res = convert_modes (mode, wmode, res, uns);
1465 	  if (uns)
1466 	    /* For the unsigned multiplication, there was overflow if
1467 	       HIPART is non-zero.  */
1468 	    do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1469 				     NULL_RTX, NULL, done_label,
1470 				     PROB_VERY_LIKELY);
1471 	  else
1472 	    {
1473 	      rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1474 					  NULL_RTX, 0);
1475 	      /* RES is low half of the double width result, HIPART
1476 		 the high half.  There was overflow if
1477 		 HIPART is different from RES < 0 ? -1 : 0.  */
1478 	      do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1479 				       NULL_RTX, NULL, done_label,
1480 				       PROB_VERY_LIKELY);
1481 	    }
1482 	}
1483       else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1484 	{
1485 	  rtx_code_label *large_op0 = gen_label_rtx ();
1486 	  rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1487 	  rtx_code_label *one_small_one_large = gen_label_rtx ();
1488 	  rtx_code_label *both_ops_large = gen_label_rtx ();
1489 	  rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1490 	  rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1491 	  rtx_code_label *do_overflow = gen_label_rtx ();
1492 	  rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1493 
1494 	  unsigned int hprec = GET_MODE_PRECISION (hmode);
1495 	  rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1496 				      NULL_RTX, uns);
1497 	  hipart0 = convert_modes (hmode, mode, hipart0, uns);
1498 	  rtx lopart0 = convert_modes (hmode, mode, op0, uns);
1499 	  rtx signbit0 = const0_rtx;
1500 	  if (!uns)
1501 	    signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1502 				     NULL_RTX, 0);
1503 	  rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1504 				      NULL_RTX, uns);
1505 	  hipart1 = convert_modes (hmode, mode, hipart1, uns);
1506 	  rtx lopart1 = convert_modes (hmode, mode, op1, uns);
1507 	  rtx signbit1 = const0_rtx;
1508 	  if (!uns)
1509 	    signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1510 				     NULL_RTX, 0);
1511 
1512 	  res = gen_reg_rtx (mode);
1513 
1514 	  /* True if op0 resp. op1 are known to be in the range of
1515 	     halfstype.  */
1516 	  bool op0_small_p = false;
1517 	  bool op1_small_p = false;
1518 	  /* True if op0 resp. op1 are known to have all zeros or all ones
1519 	     in the upper half of bits, but are not known to be
1520 	     op{0,1}_small_p.  */
1521 	  bool op0_medium_p = false;
1522 	  bool op1_medium_p = false;
1523 	  /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1524 	     nonnegative, 1 if unknown.  */
1525 	  int op0_sign = 1;
1526 	  int op1_sign = 1;
1527 
1528 	  if (pos_neg0 == 1)
1529 	    op0_sign = 0;
1530 	  else if (pos_neg0 == 2)
1531 	    op0_sign = -1;
1532 	  if (pos_neg1 == 1)
1533 	    op1_sign = 0;
1534 	  else if (pos_neg1 == 2)
1535 	    op1_sign = -1;
1536 
1537 	  unsigned int mprec0 = prec;
1538 	  if (arg0 != error_mark_node)
1539 	    mprec0 = get_min_precision (arg0, sign);
1540 	  if (mprec0 <= hprec)
1541 	    op0_small_p = true;
1542 	  else if (!uns && mprec0 <= hprec + 1)
1543 	    op0_medium_p = true;
1544 	  unsigned int mprec1 = prec;
1545 	  if (arg1 != error_mark_node)
1546 	    mprec1 = get_min_precision (arg1, sign);
1547 	  if (mprec1 <= hprec)
1548 	    op1_small_p = true;
1549 	  else if (!uns && mprec1 <= hprec + 1)
1550 	    op1_medium_p = true;
1551 
1552 	  int smaller_sign = 1;
1553 	  int larger_sign = 1;
1554 	  if (op0_small_p)
1555 	    {
1556 	      smaller_sign = op0_sign;
1557 	      larger_sign = op1_sign;
1558 	    }
1559 	  else if (op1_small_p)
1560 	    {
1561 	      smaller_sign = op1_sign;
1562 	      larger_sign = op0_sign;
1563 	    }
1564 	  else if (op0_sign == op1_sign)
1565 	    {
1566 	      smaller_sign = op0_sign;
1567 	      larger_sign = op0_sign;
1568 	    }
1569 
1570 	  if (!op0_small_p)
1571 	    do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1572 				     NULL_RTX, NULL, large_op0,
1573 				     PROB_UNLIKELY);
1574 
1575 	  if (!op1_small_p)
1576 	    do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1577 				     NULL_RTX, NULL, small_op0_large_op1,
1578 				     PROB_UNLIKELY);
1579 
1580 	  /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1581 	     hmode to mode, the multiplication will never overflow.  We can
1582 	     do just one hmode x hmode => mode widening multiplication.  */
1583 	  rtx lopart0s = lopart0, lopart1s = lopart1;
1584 	  if (GET_CODE (lopart0) == SUBREG)
1585 	    {
1586 	      lopart0s = shallow_copy_rtx (lopart0);
1587 	      SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1588 	      SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1589 	    }
1590 	  if (GET_CODE (lopart1) == SUBREG)
1591 	    {
1592 	      lopart1s = shallow_copy_rtx (lopart1);
1593 	      SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1594 	      SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1595 	    }
1596 	  tree halfstype = build_nonstandard_integer_type (hprec, uns);
1597 	  ops.op0 = make_tree (halfstype, lopart0s);
1598 	  ops.op1 = make_tree (halfstype, lopart1s);
1599 	  ops.code = WIDEN_MULT_EXPR;
1600 	  ops.type = type;
1601 	  rtx thisres
1602 	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1603 	  emit_move_insn (res, thisres);
1604 	  emit_jump (done_label);
1605 
1606 	  emit_label (small_op0_large_op1);
1607 
1608 	  /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1609 	     but op1 is not, just swap the arguments and handle it as op1
1610 	     sign/zero extended, op0 not.  */
1611 	  rtx larger = gen_reg_rtx (mode);
1612 	  rtx hipart = gen_reg_rtx (hmode);
1613 	  rtx lopart = gen_reg_rtx (hmode);
1614 	  emit_move_insn (larger, op1);
1615 	  emit_move_insn (hipart, hipart1);
1616 	  emit_move_insn (lopart, lopart0);
1617 	  emit_jump (one_small_one_large);
1618 
1619 	  emit_label (large_op0);
1620 
1621 	  if (!op1_small_p)
1622 	    do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1623 				     NULL_RTX, NULL, both_ops_large,
1624 				     PROB_UNLIKELY);
1625 
1626 	  /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1627 	     but op0 is not, prepare larger, hipart and lopart pseudos and
1628 	     handle it together with small_op0_large_op1.  */
1629 	  emit_move_insn (larger, op0);
1630 	  emit_move_insn (hipart, hipart0);
1631 	  emit_move_insn (lopart, lopart1);
1632 
1633 	  emit_label (one_small_one_large);
1634 
1635 	  /* lopart is the low part of the operand that is sign extended
1636 	     to mode, larger is the other operand, hipart is the
1637 	     high part of larger and lopart0 and lopart1 are the low parts
1638 	     of both operands.
1639 	     We perform lopart0 * lopart1 and lopart * hipart widening
1640 	     multiplications.  */
1641 	  tree halfutype = build_nonstandard_integer_type (hprec, 1);
1642 	  ops.op0 = make_tree (halfutype, lopart0);
1643 	  ops.op1 = make_tree (halfutype, lopart1);
1644 	  rtx lo0xlo1
1645 	    = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1646 
1647 	  ops.op0 = make_tree (halfutype, lopart);
1648 	  ops.op1 = make_tree (halfutype, hipart);
1649 	  rtx loxhi = gen_reg_rtx (mode);
1650 	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1651 	  emit_move_insn (loxhi, tem);
1652 
1653 	  if (!uns)
1654 	    {
1655 	      /* if (hipart < 0) loxhi -= lopart << (bitsize / 2);  */
1656 	      if (larger_sign == 0)
1657 		emit_jump (after_hipart_neg);
1658 	      else if (larger_sign != -1)
1659 		do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1660 					 NULL_RTX, NULL, after_hipart_neg,
1661 					 PROB_EVEN);
1662 
1663 	      tem = convert_modes (mode, hmode, lopart, 1);
1664 	      tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1665 	      tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1666 					 1, OPTAB_DIRECT);
1667 	      emit_move_insn (loxhi, tem);
1668 
1669 	      emit_label (after_hipart_neg);
1670 
1671 	      /* if (lopart < 0) loxhi -= larger;  */
1672 	      if (smaller_sign == 0)
1673 		emit_jump (after_lopart_neg);
1674 	      else if (smaller_sign != -1)
1675 		do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1676 					 NULL_RTX, NULL, after_lopart_neg,
1677 					 PROB_EVEN);
1678 
1679 	      tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1680 					 1, OPTAB_DIRECT);
1681 	      emit_move_insn (loxhi, tem);
1682 
1683 	      emit_label (after_lopart_neg);
1684 	    }
1685 
1686 	  /* loxhi += (uns) lo0xlo1 >> (bitsize / 2);  */
1687 	  tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1688 	  tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1689 				     1, OPTAB_DIRECT);
1690 	  emit_move_insn (loxhi, tem);
1691 
1692 	  /* if (loxhi >> (bitsize / 2)
1693 		 == (hmode) loxhi >> (bitsize / 2 - 1))  (if !uns)
1694 	     if (loxhi >> (bitsize / 2) == 0		 (if uns).  */
1695 	  rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1696 					  NULL_RTX, 0);
1697 	  hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
1698 	  rtx signbitloxhi = const0_rtx;
1699 	  if (!uns)
1700 	    signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1701 					 convert_modes (hmode, mode,
1702 							loxhi, 0),
1703 					 hprec - 1, NULL_RTX, 0);
1704 
1705 	  do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1706 				   NULL_RTX, NULL, do_overflow,
1707 				   PROB_VERY_UNLIKELY);
1708 
1709 	  /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1;  */
1710 	  rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1711 					   NULL_RTX, 1);
1712 	  tem = convert_modes (mode, hmode,
1713 			       convert_modes (hmode, mode, lo0xlo1, 1), 1);
1714 
1715 	  tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1716 				     1, OPTAB_DIRECT);
1717 	  if (tem != res)
1718 	    emit_move_insn (res, tem);
1719 	  emit_jump (done_label);
1720 
1721 	  emit_label (both_ops_large);
1722 
1723 	  /* If both operands are large (not sign (!uns) or zero (uns)
1724 	     extended from hmode), then perform the full multiplication
1725 	     which will be the result of the operation.
1726 	     The only cases which don't overflow are for signed multiplication
1727 	     some cases where both hipart0 and highpart1 are 0 or -1.
1728 	     For unsigned multiplication when high parts are both non-zero
1729 	     this overflows always.  */
1730 	  ops.code = MULT_EXPR;
1731 	  ops.op0 = make_tree (type, op0);
1732 	  ops.op1 = make_tree (type, op1);
1733 	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1734 	  emit_move_insn (res, tem);
1735 
1736 	  if (!uns)
1737 	    {
1738 	      if (!op0_medium_p)
1739 		{
1740 		  tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1741 					     NULL_RTX, 1, OPTAB_DIRECT);
1742 		  do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1743 					   NULL_RTX, NULL, do_error,
1744 					   PROB_VERY_UNLIKELY);
1745 		}
1746 
1747 	      if (!op1_medium_p)
1748 		{
1749 		  tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1750 					     NULL_RTX, 1, OPTAB_DIRECT);
1751 		  do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1752 					   NULL_RTX, NULL, do_error,
1753 					   PROB_VERY_UNLIKELY);
1754 		}
1755 
1756 	      /* At this point hipart{0,1} are both in [-1, 0].  If they are
1757 		 the same, overflow happened if res is non-positive, if they
1758 		 are different, overflow happened if res is positive.  */
1759 	      if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1760 		emit_jump (hipart_different);
1761 	      else if (op0_sign == 1 || op1_sign == 1)
1762 		do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1763 					 NULL_RTX, NULL, hipart_different,
1764 					 PROB_EVEN);
1765 
1766 	      do_compare_rtx_and_jump (res, const0_rtx, LE, false, mode,
1767 				       NULL_RTX, NULL, do_error,
1768 				       PROB_VERY_UNLIKELY);
1769 	      emit_jump (done_label);
1770 
1771 	      emit_label (hipart_different);
1772 
1773 	      do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1774 				       NULL_RTX, NULL, do_error,
1775 				       PROB_VERY_UNLIKELY);
1776 	      emit_jump (done_label);
1777 	    }
1778 
1779 	  emit_label (do_overflow);
1780 
1781 	  /* Overflow, do full multiplication and fallthru into do_error.  */
1782 	  ops.op0 = make_tree (type, op0);
1783 	  ops.op1 = make_tree (type, op1);
1784 	  tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1785 	  emit_move_insn (res, tem);
1786 	}
1787       else
1788 	{
1789 	  gcc_assert (!is_ubsan);
1790 	  ops.code = MULT_EXPR;
1791 	  ops.type = type;
1792 	  res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1793 	  emit_jump (done_label);
1794 	}
1795     }
1796 
1797  do_error_label:
1798   emit_label (do_error);
1799   if (is_ubsan)
1800     {
1801       /* Expand the ubsan builtin call.  */
1802       push_temp_slots ();
1803       fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1804 					 arg0, arg1, datap);
1805       expand_normal (fn);
1806       pop_temp_slots ();
1807       do_pending_stack_adjust ();
1808     }
1809   else if (lhs)
1810     expand_arith_set_overflow (lhs, target);
1811 
1812   /* We're done.  */
1813   emit_label (done_label);
1814 
1815   /* u1 * u2 -> sr  */
1816   if (uns0_p && uns1_p && !unsr_p)
1817     {
1818       rtx_code_label *all_done_label = gen_label_rtx ();
1819       do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1820 			       NULL, all_done_label, PROB_VERY_LIKELY);
1821       expand_arith_set_overflow (lhs, target);
1822       emit_label (all_done_label);
1823     }
1824 
1825   /* s1 * u2 -> sr  */
1826   if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1827     {
1828       rtx_code_label *all_done_label = gen_label_rtx ();
1829       rtx_code_label *set_noovf = gen_label_rtx ();
1830       do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1831 			       NULL, all_done_label, PROB_VERY_LIKELY);
1832       expand_arith_set_overflow (lhs, target);
1833       do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1834 			       NULL, set_noovf, PROB_VERY_LIKELY);
1835       do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1836 			       NULL, all_done_label, PROB_VERY_UNLIKELY);
1837       do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1838 			       all_done_label, PROB_VERY_UNLIKELY);
1839       emit_label (set_noovf);
1840       write_complex_part (target, const0_rtx, true);
1841       emit_label (all_done_label);
1842     }
1843 
1844   if (lhs)
1845     {
1846       if (is_ubsan)
1847 	expand_ubsan_result_store (target, res);
1848       else
1849 	expand_arith_overflow_result_store (lhs, target, mode, res);
1850     }
1851 }
1852 
1853 /* Expand UBSAN_CHECK_* internal function if it has vector operands.  */
1854 
1855 static void
1856 expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
1857 			      tree arg0, tree arg1)
1858 {
1859   int cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
1860   rtx_code_label *loop_lab = NULL;
1861   rtx cntvar = NULL_RTX;
1862   tree cntv = NULL_TREE;
1863   tree eltype = TREE_TYPE (TREE_TYPE (arg0));
1864   tree sz = TYPE_SIZE (eltype);
1865   tree data = NULL_TREE;
1866   tree resv = NULL_TREE;
1867   rtx lhsr = NULL_RTX;
1868   rtx resvr = NULL_RTX;
1869 
1870   if (lhs)
1871     {
1872       optab op;
1873       lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1874       if (!VECTOR_MODE_P (GET_MODE (lhsr))
1875 	  || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
1876 					optab_default)) == unknown_optab
1877 	  || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
1878 	      == CODE_FOR_nothing))
1879 	{
1880 	  if (MEM_P (lhsr))
1881 	    resv = make_tree (TREE_TYPE (lhs), lhsr);
1882 	  else
1883 	    {
1884 	      resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
1885 	      resv = make_tree (TREE_TYPE (lhs), resvr);
1886 	    }
1887 	}
1888     }
1889   if (cnt > 4)
1890     {
1891       do_pending_stack_adjust ();
1892       loop_lab = gen_label_rtx ();
1893       cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
1894       cntv = make_tree (sizetype, cntvar);
1895       emit_move_insn (cntvar, const0_rtx);
1896       emit_label (loop_lab);
1897     }
1898   if (TREE_CODE (arg0) != VECTOR_CST)
1899     {
1900       rtx arg0r = expand_normal (arg0);
1901       arg0 = make_tree (TREE_TYPE (arg0), arg0r);
1902     }
1903   if (TREE_CODE (arg1) != VECTOR_CST)
1904     {
1905       rtx arg1r = expand_normal (arg1);
1906       arg1 = make_tree (TREE_TYPE (arg1), arg1r);
1907     }
1908   for (int i = 0; i < (cnt > 4 ? 1 : cnt); i++)
1909     {
1910       tree op0, op1, res = NULL_TREE;
1911       if (cnt > 4)
1912 	{
1913 	  tree atype = build_array_type_nelts (eltype, cnt);
1914 	  op0 = uniform_vector_p (arg0);
1915 	  if (op0 == NULL_TREE)
1916 	    {
1917 	      op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
1918 	      op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
1919 				NULL_TREE, NULL_TREE);
1920 	    }
1921 	  op1 = uniform_vector_p (arg1);
1922 	  if (op1 == NULL_TREE)
1923 	    {
1924 	      op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
1925 	      op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
1926 				NULL_TREE, NULL_TREE);
1927 	    }
1928 	  if (resv)
1929 	    {
1930 	      res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
1931 	      res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
1932 				NULL_TREE, NULL_TREE);
1933 	    }
1934 	}
1935       else
1936 	{
1937 	  tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
1938 	  op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
1939 	  op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
1940 	  if (resv)
1941 	    res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
1942 				   bitpos);
1943 	}
1944       switch (code)
1945 	{
1946 	case PLUS_EXPR:
1947 	  expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
1948 				  false, false, false, true, &data);
1949 	  break;
1950 	case MINUS_EXPR:
1951 	  if (cnt > 4 ? integer_zerop (arg0) : integer_zerop (op0))
1952 	    expand_neg_overflow (loc, res, op1, true, &data);
1953 	  else
1954 	    expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
1955 				    false, false, false, true, &data);
1956 	  break;
1957 	case MULT_EXPR:
1958 	  expand_mul_overflow (loc, res, op0, op1, false, false, false,
1959 			       true, &data);
1960 	  break;
1961 	default:
1962 	  gcc_unreachable ();
1963 	}
1964     }
1965   if (cnt > 4)
1966     {
1967       struct separate_ops ops;
1968       ops.code = PLUS_EXPR;
1969       ops.type = TREE_TYPE (cntv);
1970       ops.op0 = cntv;
1971       ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
1972       ops.op2 = NULL_TREE;
1973       ops.location = loc;
1974       rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
1975 				    EXPAND_NORMAL);
1976       if (ret != cntvar)
1977 	emit_move_insn (cntvar, ret);
1978       do_compare_rtx_and_jump (cntvar, GEN_INT (cnt), NE, false,
1979 			       TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
1980 			       PROB_VERY_LIKELY);
1981     }
1982   if (lhs && resv == NULL_TREE)
1983     {
1984       struct separate_ops ops;
1985       ops.code = code;
1986       ops.type = TREE_TYPE (arg0);
1987       ops.op0 = arg0;
1988       ops.op1 = arg1;
1989       ops.op2 = NULL_TREE;
1990       ops.location = loc;
1991       rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
1992 				    EXPAND_NORMAL);
1993       if (ret != lhsr)
1994 	emit_move_insn (lhsr, ret);
1995     }
1996   else if (resvr)
1997     emit_move_insn (lhsr, resvr);
1998 }
1999 
2000 /* Expand UBSAN_CHECK_ADD call STMT.  */
2001 
2002 static void
2003 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2004 {
2005   location_t loc = gimple_location (stmt);
2006   tree lhs = gimple_call_lhs (stmt);
2007   tree arg0 = gimple_call_arg (stmt, 0);
2008   tree arg1 = gimple_call_arg (stmt, 1);
2009   if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2010     expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2011   else
2012     expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2013 			    false, false, false, true, NULL);
2014 }
2015 
2016 /* Expand UBSAN_CHECK_SUB call STMT.  */
2017 
2018 static void
2019 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2020 {
2021   location_t loc = gimple_location (stmt);
2022   tree lhs = gimple_call_lhs (stmt);
2023   tree arg0 = gimple_call_arg (stmt, 0);
2024   tree arg1 = gimple_call_arg (stmt, 1);
2025   if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2026     expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2027   else if (integer_zerop (arg0))
2028     expand_neg_overflow (loc, lhs, arg1, true, NULL);
2029   else
2030     expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2031 			    false, false, false, true, NULL);
2032 }
2033 
2034 /* Expand UBSAN_CHECK_MUL call STMT.  */
2035 
2036 static void
2037 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2038 {
2039   location_t loc = gimple_location (stmt);
2040   tree lhs = gimple_call_lhs (stmt);
2041   tree arg0 = gimple_call_arg (stmt, 0);
2042   tree arg1 = gimple_call_arg (stmt, 1);
2043   if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2044     expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2045   else
2046     expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2047 			 NULL);
2048 }
2049 
2050 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion.  */
2051 
2052 static void
2053 expand_arith_overflow (enum tree_code code, gimple *stmt)
2054 {
2055   tree lhs = gimple_call_lhs (stmt);
2056   if (lhs == NULL_TREE)
2057     return;
2058   tree arg0 = gimple_call_arg (stmt, 0);
2059   tree arg1 = gimple_call_arg (stmt, 1);
2060   tree type = TREE_TYPE (TREE_TYPE (lhs));
2061   int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2062   int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2063   int unsr_p = TYPE_UNSIGNED (type);
2064   int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2065   int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2066   int precres = TYPE_PRECISION (type);
2067   location_t loc = gimple_location (stmt);
2068   if (!uns0_p && get_range_pos_neg (arg0) == 1)
2069     uns0_p = true;
2070   if (!uns1_p && get_range_pos_neg (arg1) == 1)
2071     uns1_p = true;
2072   int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2073   prec0 = MIN (prec0, pr);
2074   pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2075   prec1 = MIN (prec1, pr);
2076 
2077   /* If uns0_p && uns1_p, precop is minimum needed precision
2078      of unsigned type to hold the exact result, otherwise
2079      precop is minimum needed precision of signed type to
2080      hold the exact result.  */
2081   int precop;
2082   if (code == MULT_EXPR)
2083     precop = prec0 + prec1 + (uns0_p != uns1_p);
2084   else
2085     {
2086       if (uns0_p == uns1_p)
2087 	precop = MAX (prec0, prec1) + 1;
2088       else if (uns0_p)
2089 	precop = MAX (prec0 + 1, prec1) + 1;
2090       else
2091 	precop = MAX (prec0, prec1 + 1) + 1;
2092     }
2093   int orig_precres = precres;
2094 
2095   do
2096     {
2097       if ((uns0_p && uns1_p)
2098 	  ? ((precop + !unsr_p) <= precres
2099 	     /* u1 - u2 -> ur can overflow, no matter what precision
2100 		the result has.  */
2101 	     && (code != MINUS_EXPR || !unsr_p))
2102 	  : (!unsr_p && precop <= precres))
2103 	{
2104 	  /* The infinity precision result will always fit into result.  */
2105 	  rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2106 	  write_complex_part (target, const0_rtx, true);
2107 	  enum machine_mode mode = TYPE_MODE (type);
2108 	  struct separate_ops ops;
2109 	  ops.code = code;
2110 	  ops.type = type;
2111 	  ops.op0 = fold_convert_loc (loc, type, arg0);
2112 	  ops.op1 = fold_convert_loc (loc, type, arg1);
2113 	  ops.op2 = NULL_TREE;
2114 	  ops.location = loc;
2115 	  rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2116 	  expand_arith_overflow_result_store (lhs, target, mode, tem);
2117 	  return;
2118 	}
2119 
2120       /* For operations with low precision, if target doesn't have them, start
2121 	 with precres widening right away, otherwise do it only if the most
2122 	 simple cases can't be used.  */
2123       const int min_precision = targetm.min_arithmetic_precision ();
2124       if (orig_precres == precres && precres < min_precision)
2125 	;
2126       else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2127 		&& prec1 <= precres)
2128 	  || ((!uns0_p || !uns1_p) && !unsr_p
2129 	      && prec0 + uns0_p <= precres
2130 	      && prec1 + uns1_p <= precres))
2131 	{
2132 	  arg0 = fold_convert_loc (loc, type, arg0);
2133 	  arg1 = fold_convert_loc (loc, type, arg1);
2134 	  switch (code)
2135 	    {
2136 	    case MINUS_EXPR:
2137 	      if (integer_zerop (arg0) && !unsr_p)
2138 		{
2139 		  expand_neg_overflow (loc, lhs, arg1, false, NULL);
2140 		  return;
2141 		}
2142 	      /* FALLTHRU */
2143 	    case PLUS_EXPR:
2144 	      expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2145 				      unsr_p, unsr_p, false, NULL);
2146 	      return;
2147 	    case MULT_EXPR:
2148 	      expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2149 				   unsr_p, unsr_p, false, NULL);
2150 	      return;
2151 	    default:
2152 	      gcc_unreachable ();
2153 	    }
2154 	}
2155 
2156       /* For sub-word operations, retry with a wider type first.  */
2157       if (orig_precres == precres && precop <= BITS_PER_WORD)
2158 	{
2159 	  int p = MAX (min_precision, precop);
2160 	  enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
2161 	  tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2162 							uns0_p && uns1_p
2163 							&& unsr_p);
2164 	  p = TYPE_PRECISION (optype);
2165 	  if (p > precres)
2166 	    {
2167 	      precres = p;
2168 	      unsr_p = TYPE_UNSIGNED (optype);
2169 	      type = optype;
2170 	      continue;
2171 	    }
2172 	}
2173 
2174       if (prec0 <= precres && prec1 <= precres)
2175 	{
2176 	  tree types[2];
2177 	  if (unsr_p)
2178 	    {
2179 	      types[0] = build_nonstandard_integer_type (precres, 0);
2180 	      types[1] = type;
2181 	    }
2182 	  else
2183 	    {
2184 	      types[0] = type;
2185 	      types[1] = build_nonstandard_integer_type (precres, 1);
2186 	    }
2187 	  arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2188 	  arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2189 	  if (code != MULT_EXPR)
2190 	    expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2191 				    uns0_p, uns1_p, false, NULL);
2192 	  else
2193 	    expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2194 				 uns0_p, uns1_p, false, NULL);
2195 	  return;
2196 	}
2197 
2198       /* Retry with a wider type.  */
2199       if (orig_precres == precres)
2200 	{
2201 	  int p = MAX (prec0, prec1);
2202 	  enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
2203 	  tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2204 							uns0_p && uns1_p
2205 							&& unsr_p);
2206 	  p = TYPE_PRECISION (optype);
2207 	  if (p > precres)
2208 	    {
2209 	      precres = p;
2210 	      unsr_p = TYPE_UNSIGNED (optype);
2211 	      type = optype;
2212 	      continue;
2213 	    }
2214 	}
2215 
2216       gcc_unreachable ();
2217     }
2218   while (1);
2219 }
2220 
2221 /* Expand ADD_OVERFLOW STMT.  */
2222 
2223 static void
2224 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2225 {
2226   expand_arith_overflow (PLUS_EXPR, stmt);
2227 }
2228 
2229 /* Expand SUB_OVERFLOW STMT.  */
2230 
2231 static void
2232 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2233 {
2234   expand_arith_overflow (MINUS_EXPR, stmt);
2235 }
2236 
2237 /* Expand MUL_OVERFLOW STMT.  */
2238 
2239 static void
2240 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2241 {
2242   expand_arith_overflow (MULT_EXPR, stmt);
2243 }
2244 
2245 /* This should get folded in tree-vectorizer.c.  */
2246 
2247 static void
2248 expand_LOOP_VECTORIZED (internal_fn, gcall *)
2249 {
2250   gcc_unreachable ();
2251 }
2252 
2253 /* Expand MASK_LOAD call STMT using optab OPTAB.  */
2254 
2255 static void
2256 expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2257 {
2258   struct expand_operand ops[3];
2259   tree type, lhs, rhs, maskt, ptr;
2260   rtx mem, target, mask;
2261   unsigned align;
2262 
2263   maskt = gimple_call_arg (stmt, 2);
2264   lhs = gimple_call_lhs (stmt);
2265   if (lhs == NULL_TREE)
2266     return;
2267   type = TREE_TYPE (lhs);
2268   ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2269   align = tree_to_shwi (gimple_call_arg (stmt, 1));
2270   if (TYPE_ALIGN (type) != align)
2271     type = build_aligned_type (type, align);
2272   rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2273 
2274   mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2275   gcc_assert (MEM_P (mem));
2276   mask = expand_normal (maskt);
2277   target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2278   create_output_operand (&ops[0], target, TYPE_MODE (type));
2279   create_fixed_operand (&ops[1], mem);
2280   create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2281   expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2282 				      TYPE_MODE (TREE_TYPE (maskt))),
2283 	       3, ops);
2284 }
2285 
2286 /* Expand MASK_STORE call STMT using optab OPTAB.  */
2287 
2288 static void
2289 expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2290 {
2291   struct expand_operand ops[3];
2292   tree type, lhs, rhs, maskt, ptr;
2293   rtx mem, reg, mask;
2294   unsigned align;
2295 
2296   maskt = gimple_call_arg (stmt, 2);
2297   rhs = gimple_call_arg (stmt, 3);
2298   type = TREE_TYPE (rhs);
2299   ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2300   align = tree_to_shwi (gimple_call_arg (stmt, 1));
2301   if (TYPE_ALIGN (type) != align)
2302     type = build_aligned_type (type, align);
2303   lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2304 
2305   mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2306   gcc_assert (MEM_P (mem));
2307   mask = expand_normal (maskt);
2308   reg = expand_normal (rhs);
2309   create_fixed_operand (&ops[0], mem);
2310   create_input_operand (&ops[1], reg, TYPE_MODE (type));
2311   create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2312   expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2313 				      TYPE_MODE (TREE_TYPE (maskt))),
2314 	       3, ops);
2315 }
2316 
2317 static void
2318 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2319 {
2320 }
2321 
2322 static void
2323 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2324 {
2325   /* When guessing was done, the hints should be already stripped away.  */
2326   gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2327 
2328   rtx target;
2329   tree lhs = gimple_call_lhs (stmt);
2330   if (lhs)
2331     target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2332   else
2333     target = const0_rtx;
2334   rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2335   if (lhs && val != target)
2336     emit_move_insn (target, val);
2337 }
2338 
2339 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg.  So this dummy function
2340    should never be called.  */
2341 
2342 static void
2343 expand_VA_ARG (internal_fn, gcall *)
2344 {
2345   gcc_unreachable ();
2346 }
2347 
2348 /* Expand the IFN_UNIQUE function according to its first argument.  */
2349 
2350 static void
2351 expand_UNIQUE (internal_fn, gcall *stmt)
2352 {
2353   rtx pattern = NULL_RTX;
2354   enum ifn_unique_kind kind
2355     = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2356 
2357   switch (kind)
2358     {
2359     default:
2360       gcc_unreachable ();
2361 
2362     case IFN_UNIQUE_UNSPEC:
2363       if (targetm.have_unique ())
2364 	pattern = targetm.gen_unique ();
2365       break;
2366 
2367     case IFN_UNIQUE_OACC_FORK:
2368     case IFN_UNIQUE_OACC_JOIN:
2369       if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2370 	{
2371 	  tree lhs = gimple_call_lhs (stmt);
2372 	  rtx target = const0_rtx;
2373 
2374 	  if (lhs)
2375 	    target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2376 
2377 	  rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2378 	  rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2379 
2380 	  if (kind == IFN_UNIQUE_OACC_FORK)
2381 	    pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2382 	  else
2383 	    pattern = targetm.gen_oacc_join (target, data_dep, axis);
2384 	}
2385       else
2386 	gcc_unreachable ();
2387       break;
2388     }
2389 
2390   if (pattern)
2391     emit_insn (pattern);
2392 }
2393 
2394 /* The size of an OpenACC compute dimension.  */
2395 
2396 static void
2397 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2398 {
2399   tree lhs = gimple_call_lhs (stmt);
2400 
2401   if (!lhs)
2402     return;
2403 
2404   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2405   if (targetm.have_oacc_dim_size ())
2406     {
2407       rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2408 			     VOIDmode, EXPAND_NORMAL);
2409       emit_insn (targetm.gen_oacc_dim_size (target, dim));
2410     }
2411   else
2412     emit_move_insn (target, GEN_INT (1));
2413 }
2414 
2415 /* The position of an OpenACC execution engine along one compute axis.  */
2416 
2417 static void
2418 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
2419 {
2420   tree lhs = gimple_call_lhs (stmt);
2421 
2422   if (!lhs)
2423     return;
2424 
2425   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2426   if (targetm.have_oacc_dim_pos ())
2427     {
2428       rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2429 			     VOIDmode, EXPAND_NORMAL);
2430       emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2431     }
2432   else
2433     emit_move_insn (target, const0_rtx);
2434 }
2435 
2436 /* This is expanded by oacc_device_lower pass.  */
2437 
2438 static void
2439 expand_GOACC_LOOP (internal_fn, gcall *)
2440 {
2441   gcc_unreachable ();
2442 }
2443 
2444 /* This is expanded by oacc_device_lower pass.  */
2445 
2446 static void
2447 expand_GOACC_REDUCTION (internal_fn, gcall *)
2448 {
2449   gcc_unreachable ();
2450 }
2451 
2452 /* This is expanded by oacc_device_lower pass.  */
2453 
2454 static void
2455 expand_GOACC_TILE (internal_fn, gcall *)
2456 {
2457   gcc_unreachable ();
2458 }
2459 
2460 /* Set errno to EDOM.  */
2461 
2462 static void
2463 expand_SET_EDOM (internal_fn, gcall *)
2464 {
2465 #ifdef TARGET_EDOM
2466 #ifdef GEN_ERRNO_RTX
2467   rtx errno_rtx = GEN_ERRNO_RTX;
2468 #else
2469   rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
2470 #endif
2471   emit_move_insn (errno_rtx,
2472 		  gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
2473 #else
2474   gcc_unreachable ();
2475 #endif
2476 }
2477 
2478 /* Expand atomic bit test and set.  */
2479 
2480 static void
2481 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
2482 {
2483   expand_ifn_atomic_bit_test_and (call);
2484 }
2485 
2486 /* Expand atomic bit test and complement.  */
2487 
2488 static void
2489 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
2490 {
2491   expand_ifn_atomic_bit_test_and (call);
2492 }
2493 
2494 /* Expand atomic bit test and reset.  */
2495 
2496 static void
2497 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
2498 {
2499   expand_ifn_atomic_bit_test_and (call);
2500 }
2501 
2502 /* Expand atomic bit test and set.  */
2503 
2504 static void
2505 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
2506 {
2507   expand_ifn_atomic_compare_exchange (call);
2508 }
2509 
2510 /* Expand LAUNDER to assignment, lhs = arg0.  */
2511 
2512 static void
2513 expand_LAUNDER (internal_fn, gcall *call)
2514 {
2515   tree lhs = gimple_call_lhs (call);
2516 
2517   if (!lhs)
2518     return;
2519 
2520   expand_assignment (lhs, gimple_call_arg (call, 0), false);
2521 }
2522 
2523 /* Expand DIVMOD() using:
2524  a) optab handler for udivmod/sdivmod if it is available.
2525  b) If optab_handler doesn't exist, generate call to
2526     target-specific divmod libfunc.  */
2527 
2528 static void
2529 expand_DIVMOD (internal_fn, gcall *call_stmt)
2530 {
2531   tree lhs = gimple_call_lhs (call_stmt);
2532   tree arg0 = gimple_call_arg (call_stmt, 0);
2533   tree arg1 = gimple_call_arg (call_stmt, 1);
2534 
2535   gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
2536   tree type = TREE_TYPE (TREE_TYPE (lhs));
2537   machine_mode mode = TYPE_MODE (type);
2538   bool unsignedp = TYPE_UNSIGNED (type);
2539   optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
2540 
2541   rtx op0 = expand_normal (arg0);
2542   rtx op1 = expand_normal (arg1);
2543   rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2544 
2545   rtx quotient, remainder, libfunc;
2546 
2547   /* Check if optab_handler exists for divmod_optab for given mode.  */
2548   if (optab_handler (tab, mode) != CODE_FOR_nothing)
2549     {
2550       quotient = gen_reg_rtx (mode);
2551       remainder = gen_reg_rtx (mode);
2552       expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
2553     }
2554 
2555   /* Generate call to divmod libfunc if it exists.  */
2556   else if ((libfunc = optab_libfunc (tab, mode)) != NULL_RTX)
2557     targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
2558 				   &quotient, &remainder);
2559 
2560   else
2561     gcc_unreachable ();
2562 
2563   /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR.  */
2564   expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
2565 		       make_tree (TREE_TYPE (arg0), quotient),
2566 		       make_tree (TREE_TYPE (arg1), remainder)),
2567 	      target, VOIDmode, EXPAND_NORMAL);
2568 }
2569 
2570 /* Expand a call to FN using the operands in STMT.  FN has a single
2571    output operand and NARGS input operands.  */
2572 
2573 static void
2574 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
2575 			unsigned int nargs)
2576 {
2577   expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
2578 
2579   tree_pair types = direct_internal_fn_types (fn, stmt);
2580   insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
2581 
2582   tree lhs = gimple_call_lhs (stmt);
2583   tree lhs_type = TREE_TYPE (lhs);
2584   rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2585   create_output_operand (&ops[0], lhs_rtx, insn_data[icode].operand[0].mode);
2586 
2587   for (unsigned int i = 0; i < nargs; ++i)
2588     {
2589       tree rhs = gimple_call_arg (stmt, i);
2590       tree rhs_type = TREE_TYPE (rhs);
2591       rtx rhs_rtx = expand_normal (rhs);
2592       if (INTEGRAL_TYPE_P (rhs_type))
2593 	create_convert_operand_from (&ops[i + 1], rhs_rtx,
2594 				     TYPE_MODE (rhs_type),
2595 				     TYPE_UNSIGNED (rhs_type));
2596       else
2597 	create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
2598     }
2599 
2600   expand_insn (icode, nargs + 1, ops);
2601   if (!rtx_equal_p (lhs_rtx, ops[0].value))
2602     {
2603       /* If the return value has an integral type, convert the instruction
2604 	 result to that type.  This is useful for things that return an
2605 	 int regardless of the size of the input.  If the instruction result
2606 	 is smaller than required, assume that it is signed.
2607 
2608 	 If the return value has a nonintegral type, its mode must match
2609 	 the instruction result.  */
2610       if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
2611 	{
2612 	  /* If this is a scalar in a register that is stored in a wider
2613 	     mode than the declared mode, compute the result into its
2614 	     declared mode and then convert to the wider mode.  */
2615 	  gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2616 	  rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
2617 	  convert_move (SUBREG_REG (lhs_rtx), tmp,
2618 			SUBREG_PROMOTED_SIGN (lhs_rtx));
2619 	}
2620       else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
2621 	emit_move_insn (lhs_rtx, ops[0].value);
2622       else
2623 	{
2624 	  gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2625 	  convert_move (lhs_rtx, ops[0].value, 0);
2626 	}
2627     }
2628 }
2629 
2630 /* Expanders for optabs that can use expand_direct_optab_fn.  */
2631 
2632 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2633   expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2634 
2635 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2636   expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2637 
2638 /* RETURN_TYPE and ARGS are a return type and argument list that are
2639    in principle compatible with FN (which satisfies direct_internal_fn_p).
2640    Return the types that should be used to determine whether the
2641    target supports FN.  */
2642 
2643 tree_pair
2644 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
2645 {
2646   const direct_internal_fn_info &info = direct_internal_fn (fn);
2647   tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
2648   tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
2649   return tree_pair (type0, type1);
2650 }
2651 
2652 /* CALL is a call whose return type and arguments are in principle
2653    compatible with FN (which satisfies direct_internal_fn_p).  Return the
2654    types that should be used to determine whether the target supports FN.  */
2655 
2656 tree_pair
2657 direct_internal_fn_types (internal_fn fn, gcall *call)
2658 {
2659   const direct_internal_fn_info &info = direct_internal_fn (fn);
2660   tree op0 = (info.type0 < 0
2661 	      ? gimple_call_lhs (call)
2662 	      : gimple_call_arg (call, info.type0));
2663   tree op1 = (info.type1 < 0
2664 	      ? gimple_call_lhs (call)
2665 	      : gimple_call_arg (call, info.type1));
2666   return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
2667 }
2668 
2669 /* Return true if OPTAB is supported for TYPES (whose modes should be
2670    the same) when the optimization type is OPT_TYPE.  Used for simple
2671    direct optabs.  */
2672 
2673 static bool
2674 direct_optab_supported_p (direct_optab optab, tree_pair types,
2675 			  optimization_type opt_type)
2676 {
2677   machine_mode mode = TYPE_MODE (types.first);
2678   gcc_checking_assert (mode == TYPE_MODE (types.second));
2679   return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
2680 }
2681 
2682 /* Return true if load/store lanes optab OPTAB is supported for
2683    array type TYPES.first when the optimization type is OPT_TYPE.  */
2684 
2685 static bool
2686 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
2687 				optimization_type opt_type)
2688 {
2689   gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
2690   machine_mode imode = TYPE_MODE (types.first);
2691   machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
2692   return (convert_optab_handler (optab, imode, vmode, opt_type)
2693 	  != CODE_FOR_nothing);
2694 }
2695 
2696 #define direct_unary_optab_supported_p direct_optab_supported_p
2697 #define direct_binary_optab_supported_p direct_optab_supported_p
2698 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2699 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2700 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2701 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2702 
2703 /* Return true if FN is supported for the types in TYPES when the
2704    optimization type is OPT_TYPE.  The types are those associated with
2705    the "type0" and "type1" fields of FN's direct_internal_fn_info
2706    structure.  */
2707 
2708 bool
2709 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
2710 				optimization_type opt_type)
2711 {
2712   switch (fn)
2713     {
2714 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2715     case IFN_##CODE: break;
2716 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2717     case IFN_##CODE: \
2718       return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2719 						opt_type);
2720 #include "internal-fn.def"
2721 
2722     case IFN_LAST:
2723       break;
2724     }
2725   gcc_unreachable ();
2726 }
2727 
2728 /* Return true if FN is supported for type TYPE when the optimization
2729    type is OPT_TYPE.  The caller knows that the "type0" and "type1"
2730    fields of FN's direct_internal_fn_info structure are the same.  */
2731 
2732 bool
2733 direct_internal_fn_supported_p (internal_fn fn, tree type,
2734 				optimization_type opt_type)
2735 {
2736   const direct_internal_fn_info &info = direct_internal_fn (fn);
2737   gcc_checking_assert (info.type0 == info.type1);
2738   return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
2739 }
2740 
2741 /* Return true if IFN_SET_EDOM is supported.  */
2742 
2743 bool
2744 set_edom_supported_p (void)
2745 {
2746 #ifdef TARGET_EDOM
2747   return true;
2748 #else
2749   return false;
2750 #endif
2751 }
2752 
2753 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2754   static void						\
2755   expand_##CODE (internal_fn fn, gcall *stmt)		\
2756   {							\
2757     expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab);	\
2758   }
2759 #include "internal-fn.def"
2760 
2761 /* Routines to expand each internal function, indexed by function number.
2762    Each routine has the prototype:
2763 
2764        expand_<NAME> (gcall *stmt)
2765 
2766    where STMT is the statement that performs the call. */
2767 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
2768 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2769 #include "internal-fn.def"
2770   0
2771 };
2772 
2773 /* Return true if IFN is some form of load from memory.  */
2774 
2775 bool
2776 internal_load_fn_p (internal_fn fn)
2777 {
2778   switch (fn)
2779     {
2780     case IFN_MASK_LOAD:
2781     case IFN_LOAD_LANES:
2782       return true;
2783 
2784     default:
2785       return false;
2786     }
2787 }
2788 
2789 /* Return true if IFN is some form of store to memory.  */
2790 
2791 bool
2792 internal_store_fn_p (internal_fn fn)
2793 {
2794   switch (fn)
2795     {
2796     case IFN_MASK_STORE:
2797     case IFN_STORE_LANES:
2798       return true;
2799 
2800     default:
2801       return false;
2802     }
2803 }
2804 
2805 /* If FN takes a vector mask argument, return the index of that argument,
2806    otherwise return -1.  */
2807 
2808 int
2809 internal_fn_mask_index (internal_fn fn)
2810 {
2811   switch (fn)
2812     {
2813     case IFN_MASK_LOAD:
2814     case IFN_MASK_STORE:
2815       return 2;
2816 
2817     default:
2818       return -1;
2819     }
2820 }
2821 
2822 
2823 /* Expand STMT as though it were a call to internal function FN.  */
2824 
2825 void
2826 expand_internal_call (internal_fn fn, gcall *stmt)
2827 {
2828   internal_fn_expanders[fn] (fn, stmt);
2829 }
2830 
2831 /* Expand STMT, which is a call to internal function FN.  */
2832 
2833 void
2834 expand_internal_call (gcall *stmt)
2835 {
2836   expand_internal_call (gimple_call_internal_fn (stmt), stmt);
2837 }
2838 
2839 void
2840 expand_PHI (internal_fn, gcall *)
2841 {
2842     gcc_unreachable ();
2843 }
2844