xref: /netbsd-src/external/gpl3/gcc/dist/gcc/config/rs6000/rs6000-c.cc (revision 0a3071956a3a9fdebdbf7f338cf2d439b45fc728)
1 /* Subroutines for the C front end on the PowerPC architecture.
2    Copyright (C) 2002-2022 Free Software Foundation, Inc.
3 
4    Contributed by Zack Weinberg <zack@codesourcery.com>
5    and Paolo Bonzini <bonzini@gnu.org>
6 
7    This file is part of GCC.
8 
9    GCC is free software; you can redistribute it and/or modify it
10    under the terms of the GNU General Public License as published
11    by the Free Software Foundation; either version 3, or (at your
12    option) any later version.
13 
14    GCC is distributed in the hope that it will be useful, but WITHOUT
15    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
17    License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with GCC; see the file COPYING3.  If not see
21    <http://www.gnu.org/licenses/>.  */
22 
23 #define IN_TARGET_CODE 1
24 
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "target.h"
29 #include "c-family/c-common.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "stor-layout.h"
34 #include "c-family/c-pragma.h"
35 #include "langhooks.h"
36 #include "c/c-tree.h"
37 
38 #include "rs6000-internal.h"
39 
40 /* Handle the machine specific pragma longcall.  Its syntax is
41 
42    # pragma longcall ( TOGGLE )
43 
44    where TOGGLE is either 0 or 1.
45 
46    rs6000_default_long_calls is set to the value of TOGGLE, changing
47    whether or not new function declarations receive a longcall
48    attribute by default.  */
49 
50 void
rs6000_pragma_longcall(cpp_reader * pfile ATTRIBUTE_UNUSED)51 rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
52 {
53 #define SYNTAX_ERROR(gmsgid) do {					\
54   warning (OPT_Wpragmas, gmsgid);					\
55   warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>");	\
56   return;								\
57 } while (0)
58 
59 
60 
61   tree x, n;
62 
63   /* If we get here, generic code has already scanned the directive
64      leader and the word "longcall".  */
65 
66   if (pragma_lex (&x) != CPP_OPEN_PAREN)
67     SYNTAX_ERROR ("missing open paren");
68   if (pragma_lex (&n) != CPP_NUMBER)
69     SYNTAX_ERROR ("missing number");
70   if (pragma_lex (&x) != CPP_CLOSE_PAREN)
71     SYNTAX_ERROR ("missing close paren");
72 
73   if (n != integer_zero_node && n != integer_one_node)
74     SYNTAX_ERROR ("number must be 0 or 1");
75 
76   if (pragma_lex (&x) != CPP_EOF)
77     warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
78 
79   rs6000_default_long_calls = (n == integer_one_node);
80 }
81 
82 /* Handle defining many CPP flags based on TARGET_xxx.  As a general
83    policy, rather than trying to guess what flags a user might want a
84    #define for, it's better to define a flag for everything.  */
85 
86 #define builtin_define(TXT) cpp_define (pfile, TXT)
87 #define builtin_assert(TXT) cpp_assert (pfile, TXT)
88 
89 /* Keep the AltiVec keywords handy for fast comparisons.  */
90 static GTY(()) tree __vector_keyword;
91 static GTY(()) tree vector_keyword;
92 static GTY(()) tree __pixel_keyword;
93 static GTY(()) tree pixel_keyword;
94 static GTY(()) tree __bool_keyword;
95 static GTY(()) tree bool_keyword;
96 static GTY(()) tree _Bool_keyword;
97 static GTY(()) tree __int128_type;
98 static GTY(()) tree __uint128_type;
99 
100 /* Preserved across calls.  */
101 static tree expand_bool_pixel;
102 
103 static cpp_hashnode *
altivec_categorize_keyword(const cpp_token * tok)104 altivec_categorize_keyword (const cpp_token *tok)
105 {
106   if (tok->type == CPP_NAME)
107     {
108       cpp_hashnode *ident = tok->val.node.node;
109 
110       if (ident == C_CPP_HASHNODE (vector_keyword))
111 	return C_CPP_HASHNODE (__vector_keyword);
112 
113       if (ident == C_CPP_HASHNODE (pixel_keyword))
114 	return C_CPP_HASHNODE (__pixel_keyword);
115 
116       if (ident == C_CPP_HASHNODE (bool_keyword))
117 	return C_CPP_HASHNODE (__bool_keyword);
118 
119       if (ident == C_CPP_HASHNODE (_Bool_keyword))
120 	return C_CPP_HASHNODE (__bool_keyword);
121 
122       return ident;
123     }
124 
125   return 0;
126 }
127 
128 static void
init_vector_keywords(void)129 init_vector_keywords (void)
130 {
131   /* Keywords without two leading underscores are context-sensitive, and hence
132      implemented as conditional macros, controlled by the
133      rs6000_macro_to_expand() function below.  If we have ISA 2.07 64-bit
134      support, record the __int128_t and __uint128_t types.  */
135 
136   __vector_keyword = get_identifier ("__vector");
137   C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
138 
139   __pixel_keyword = get_identifier ("__pixel");
140   C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
141 
142   __bool_keyword = get_identifier ("__bool");
143   C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
144 
145   vector_keyword = get_identifier ("vector");
146   C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
147 
148   pixel_keyword = get_identifier ("pixel");
149   C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
150 
151   bool_keyword = get_identifier ("bool");
152   C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
153 
154   _Bool_keyword = get_identifier ("_Bool");
155   C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
156 
157   if (TARGET_VADDUQM)
158     {
159       __int128_type = get_identifier ("__int128_t");
160       __uint128_type = get_identifier ("__uint128_t");
161     }
162 }
163 
164 /* Helper function to find out which RID_INT_N_* code is the one for
165    __int128, if any.  Returns RID_MAX+1 if none apply, which is safe
166    (for our purposes, since we always expect to have __int128) to
167    compare against.  */
168 static int
rid_int128(void)169 rid_int128(void)
170 {
171   int i;
172 
173   for (i = 0; i < NUM_INT_N_ENTS; i ++)
174     if (int_n_enabled_p[i]
175 	&& int_n_data[i].bitsize == 128)
176       return RID_INT_N_0 + i;
177 
178   return RID_MAX + 1;
179 }
180 
181 /* Called to decide whether a conditional macro should be expanded.
182    Since we have exactly one such macro (i.e, 'vector'), we do not
183    need to examine the 'tok' parameter.  */
184 
185 static cpp_hashnode *
rs6000_macro_to_expand(cpp_reader * pfile,const cpp_token * tok)186 rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
187 {
188   cpp_hashnode *expand_this = tok->val.node.node;
189   cpp_hashnode *ident;
190 
191   /* If the current machine does not have altivec, don't look for the
192      keywords.  */
193   if (!TARGET_ALTIVEC)
194     return NULL;
195 
196   ident = altivec_categorize_keyword (tok);
197 
198   if (ident != expand_this)
199     expand_this = NULL;
200 
201   if (ident == C_CPP_HASHNODE (__vector_keyword))
202     {
203       int idx = 0;
204       do
205 	tok = cpp_peek_token (pfile, idx++);
206       while (tok->type == CPP_PADDING);
207       ident = altivec_categorize_keyword (tok);
208 
209       if (ident == C_CPP_HASHNODE (__pixel_keyword))
210 	{
211 	  expand_this = C_CPP_HASHNODE (__vector_keyword);
212 	  expand_bool_pixel = __pixel_keyword;
213 	}
214       else if (ident == C_CPP_HASHNODE (__bool_keyword))
215 	{
216 	  expand_this = C_CPP_HASHNODE (__vector_keyword);
217 	  expand_bool_pixel = __bool_keyword;
218 	}
219       /* The boost libraries have code with Iterator::vector vector in it.  If
220 	 we allow the normal handling, this module will be called recursively,
221 	 and the vector will be skipped.; */
222       else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
223 	{
224 	  enum rid rid_code = (enum rid)(ident->rid_code);
225 	  bool is_macro = cpp_macro_p (ident);
226 
227 	  /* If there is a function-like macro, check if it is going to be
228 	     invoked with or without arguments.  Without following ( treat
229 	     it like non-macro, otherwise the following cpp_get_token eats
230 	     what should be preserved.  */
231 	  if (is_macro && cpp_fun_like_macro_p (ident))
232 	    {
233 	      int idx2 = idx;
234 	      do
235 		tok = cpp_peek_token (pfile, idx2++);
236 	      while (tok->type == CPP_PADDING);
237 	      if (tok->type != CPP_OPEN_PAREN)
238 		is_macro = false;
239 	    }
240 
241 	  if (is_macro)
242 	    {
243 	      do
244 		(void) cpp_get_token (pfile);
245 	      while (--idx > 0);
246 	      do
247 		tok = cpp_peek_token (pfile, idx++);
248 	      while (tok->type == CPP_PADDING);
249 	      ident = altivec_categorize_keyword (tok);
250 	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
251 		{
252 		  expand_this = C_CPP_HASHNODE (__vector_keyword);
253 		  expand_bool_pixel = __pixel_keyword;
254 		  rid_code = RID_MAX;
255 		}
256 	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
257 		{
258 		  expand_this = C_CPP_HASHNODE (__vector_keyword);
259 		  expand_bool_pixel = __bool_keyword;
260 		  rid_code = RID_MAX;
261 		}
262 	      else if (ident)
263 		rid_code = (enum rid)(ident->rid_code);
264 	    }
265 
266 	  if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
267 	      || rid_code == RID_SHORT || rid_code == RID_SIGNED
268 	      || rid_code == RID_INT || rid_code == RID_CHAR
269 	      || rid_code == RID_FLOAT
270 	      || (rid_code == RID_DOUBLE && TARGET_VSX)
271 	      || (rid_code == rid_int128 () && TARGET_VADDUQM))
272 	    {
273 	      expand_this = C_CPP_HASHNODE (__vector_keyword);
274 	      /* If the next keyword is bool or pixel, it
275 		 will need to be expanded as well.  */
276 	      do
277 		tok = cpp_peek_token (pfile, idx++);
278 	      while (tok->type == CPP_PADDING);
279 	      ident = altivec_categorize_keyword (tok);
280 
281 	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
282 		expand_bool_pixel = __pixel_keyword;
283 	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
284 		expand_bool_pixel = __bool_keyword;
285 	      else
286 		{
287 		  /* Try two tokens down, too.  */
288 		  do
289 		    tok = cpp_peek_token (pfile, idx++);
290 		  while (tok->type == CPP_PADDING);
291 		  ident = altivec_categorize_keyword (tok);
292 		  if (ident == C_CPP_HASHNODE (__pixel_keyword))
293 		    expand_bool_pixel = __pixel_keyword;
294 		  else if (ident == C_CPP_HASHNODE (__bool_keyword))
295 		    expand_bool_pixel = __bool_keyword;
296 		}
297 	    }
298 
299 	  /* Support vector __int128_t, but we don't need to worry about bool
300 	     or pixel on this type.  */
301 	  else if (TARGET_VADDUQM
302 		   && (ident == C_CPP_HASHNODE (__int128_type)
303 		       || ident == C_CPP_HASHNODE (__uint128_type)))
304 	    expand_this = C_CPP_HASHNODE (__vector_keyword);
305 	}
306     }
307   else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
308     {
309       expand_this = C_CPP_HASHNODE (__pixel_keyword);
310       expand_bool_pixel = 0;
311     }
312   else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
313     {
314       expand_this = C_CPP_HASHNODE (__bool_keyword);
315       expand_bool_pixel = 0;
316     }
317 
318   return expand_this;
319 }
320 
321 
322 /* Define or undefine a single macro.  */
323 
324 static void
rs6000_define_or_undefine_macro(bool define_p,const char * name)325 rs6000_define_or_undefine_macro (bool define_p, const char *name)
326 {
327   if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
328     fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
329 
330   if (define_p)
331     cpp_define (parse_in, name);
332   else
333     cpp_undef (parse_in, name);
334 }
335 
336 /* Define or undefine macros based on the current target.  If the user does
337    #pragma GCC target, we need to adjust the macros dynamically.  Note, some of
338    the options needed for builtins have been moved to separate variables, so
339    have both the target flags and the builtin flags as arguments.  */
340 
341 void
rs6000_target_modify_macros(bool define_p,HOST_WIDE_INT flags,HOST_WIDE_INT bu_mask)342 rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
343 			     HOST_WIDE_INT bu_mask)
344 {
345   if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
346     fprintf (stderr,
347 	     "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
348 	     ", " HOST_WIDE_INT_PRINT_HEX ")\n",
349 	     (define_p) ? "define" : "undef",
350 	     flags, bu_mask);
351 
352   /* Each of the flags mentioned below controls whether certain
353      preprocessor macros will be automatically defined when
354      preprocessing source files for compilation by this compiler.
355      While most of these flags can be enabled or disabled
356      explicitly by specifying certain command-line options when
357      invoking the compiler, there are also many ways in which these
358      flags are enabled or disabled implicitly, based on compiler
359      defaults, configuration choices, and on the presence of certain
360      related command-line options.  Many, but not all, of these
361      implicit behaviors can be found in file "rs6000.cc", the
362      rs6000_option_override_internal() function.
363 
364      In general, each of the flags may be automatically enabled in
365      any of the following conditions:
366 
367      1. If no -mcpu target is specified on the command line and no
368 	--with-cpu target is specified to the configure command line
369 	and the TARGET_DEFAULT macro for this default cpu host
370 	includes the flag, and the flag has not been explicitly disabled
371 	by command-line options.
372 
373      2. If the target specified with -mcpu=target on the command line, or
374 	in the absence of a -mcpu=target command-line option, if the
375 	target specified using --with-cpu=target on the configure
376 	command line, is disqualified because the associated binary
377 	tools (e.g. the assembler) lack support for the requested cpu,
378 	and the TARGET_DEFAULT macro for this default cpu host
379 	includes the flag, and the flag has not been explicitly disabled
380 	by command-line options.
381 
382      3. If either of the above two conditions apply except that the
383 	TARGET_DEFAULT macro is defined to equal zero, and
384 	TARGET_POWERPC64 and
385 	a) BYTES_BIG_ENDIAN and the flag to be enabled is either
386 	   MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
387 	   target), or
388 	b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
389 	   MASK_POWERPC64 or it is one of the flags included in
390 	   ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
391 
392      4. If a cpu has been requested with a -mcpu=target command-line option
393 	and this cpu has not been disqualified due to shortcomings of the
394 	binary tools, and the set of flags associated with the requested cpu
395 	include the flag to be enabled.  See rs6000-cpus.def for macro
396 	definitions that represent various ABI standards
397 	(e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
398 	the specific flags that are associated with each of the cpu
399 	choices that can be specified as the target of a -mcpu=target
400 	compile option, or as the target of a --with-cpu=target
401 	configure option.  Target flags that are specified in either
402 	of these two ways are considered "implicit" since the flags
403 	are not mentioned specifically by name.
404 
405 	Additional documentation describing behavior specific to
406 	particular flags is provided below, immediately preceding the
407 	use of each relevant flag.
408 
409      5. If there is no -mcpu=target command-line option, and the cpu
410 	requested by a --with-cpu=target command-line option has not
411 	been disqualified due to shortcomings of the binary tools, and
412 	the set of flags associated with the specified target include
413 	the flag to be enabled.  See the notes immediately above for a
414 	summary of the flags associated with particular cpu
415 	definitions.  */
416 
417   /* rs6000_isa_flags based options.  */
418   rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
419   if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
420     rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
421   if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
422     rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
423   if ((flags & OPTION_MASK_POWERPC64) != 0)
424     rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
425   if ((flags & OPTION_MASK_MFCRF) != 0)
426     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
427   if ((flags & OPTION_MASK_POPCNTB) != 0)
428     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
429   if ((flags & OPTION_MASK_FPRND) != 0)
430     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
431   if ((flags & OPTION_MASK_CMPB) != 0)
432     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
433   if ((flags & OPTION_MASK_POPCNTD) != 0)
434     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
435   if ((flags & OPTION_MASK_POWER8) != 0)
436     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
437   if ((flags & OPTION_MASK_MODULO) != 0)
438     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
439   if ((flags & OPTION_MASK_POWER10) != 0)
440     rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
441   if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
442     rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
443   if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
444     rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
445   /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
446      in any of the following conditions:
447      1. The operating system is Darwin and it is configured for 64
448 	bit.  (See darwin_rs6000_override_options.)
449      2. The operating system is Darwin and the operating system
450 	version is 10.5 or higher and the user has not explicitly
451 	disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
452 	the compiler is not producing code for integration within the
453 	kernel.  (See darwin_rs6000_override_options.)
454      Note that the OPTION_MASK_ALTIVEC flag is automatically turned
455      off in any of the following conditions:
456      1. The operating system does not support saving of AltiVec
457 	registers (OS_MISSING_ALTIVEC).
458      2. If an inner context (as introduced by
459 	__attribute__((__target__())) or #pragma GCC target()
460 	requests a target that normally enables the
461 	OPTION_MASK_ALTIVEC flag but the outer-most "main target"
462 	does not support the rs6000_altivec_abi, this flag is
463 	turned off for the inner context unless OPTION_MASK_ALTIVEC
464 	was explicitly enabled for the inner context.  */
465   if ((flags & OPTION_MASK_ALTIVEC) != 0)
466     {
467       const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
468       rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
469       rs6000_define_or_undefine_macro (define_p, vec_str);
470 
471 	  /* Define this when supporting context-sensitive keywords.  */
472       if (!flag_iso)
473 	rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
474       if (rs6000_aix_extabi)
475 	rs6000_define_or_undefine_macro (define_p, "__EXTABI__");
476     }
477   /* Note that the OPTION_MASK_VSX flag is automatically turned on in
478      the following conditions:
479      1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
480         was not explicitly turned off.  Hereafter, the OPTION_MASK_VSX
481         flag is considered to have been explicitly turned on.
482      Note that the OPTION_MASK_VSX flag is automatically turned off in
483      the following conditions:
484      1. The operating system does not support saving of AltiVec
485 	registers (OS_MISSING_ALTIVEC).
486      2. If the option TARGET_HARD_FLOAT is turned off.  Hereafter, the
487 	OPTION_MASK_VSX flag is considered to have been turned off
488 	explicitly.
489      3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
490 	compilation context, or if it is turned on by any means in an
491 	inner compilation context.  Hereafter, the OPTION_MASK_VSX
492 	flag is considered to have been turned off explicitly.
493      4. If TARGET_ALTIVEC was explicitly disabled.  Hereafter, the
494 	OPTION_MASK_VSX flag is considered to have been turned off
495 	explicitly.
496      5. If an inner context (as introduced by
497 	__attribute__((__target__())) or #pragma GCC target()
498 	requests a target that normally enables the
499 	OPTION_MASK_VSX flag but the outer-most "main target"
500 	does not support the rs6000_altivec_abi, this flag is
501 	turned off for the inner context unless OPTION_MASK_VSX
502 	was explicitly enabled for the inner context.  */
503   if ((flags & OPTION_MASK_VSX) != 0)
504     rs6000_define_or_undefine_macro (define_p, "__VSX__");
505   if ((flags & OPTION_MASK_HTM) != 0)
506     {
507       rs6000_define_or_undefine_macro (define_p, "__HTM__");
508       /* Tell the user that our HTM insn patterns act as memory barriers.  */
509       rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
510     }
511   /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
512      on in the following conditions:
513      1. TARGET_P9_VECTOR is explicitly turned on and
514         OPTION_MASK_P8_VECTOR is not explicitly turned off.
515         Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
516         have been turned off explicitly.
517      Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
518      off in the following conditions:
519      1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
520 	were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
521 	not turned on explicitly.
522      2. If TARGET_ALTIVEC is turned off.  Hereafter, the
523 	OPTION_MASK_P8_VECTOR flag is considered to have been turned off
524 	explicitly.
525      3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
526         explicitly enabled.  If TARGET_VSX is explicitly enabled, the
527         OPTION_MASK_P8_VECTOR flag is hereafter also considered to
528 	have been turned off explicitly.  */
529   if ((flags & OPTION_MASK_P8_VECTOR) != 0)
530     rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
531   /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
532      off in the following conditions:
533      1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
534         not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
535         was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
536         also considered to have been turned off explicitly.
537      Note that the OPTION_MASK_P9_VECTOR is automatically turned on
538      in the following conditions:
539      1. If TARGET_P9_MINMAX was turned on explicitly.
540         Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
541         have been turned on explicitly.  */
542   if ((flags & OPTION_MASK_P9_VECTOR) != 0)
543     rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
544   /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
545      turned off in the following conditions:
546      1. If TARGET_POWERPC64 is turned off.
547      2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
548 	load/store are disabled on little endian).  */
549   if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
550     rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
551   /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
552      turned off in the following conditions:
553      1. If TARGET_POWERPC64 is turned off.
554      Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
555      automatically turned on in the following conditions:
556      1. If TARGET_QUAD_MEMORY and this flag was not explicitly
557 	disabled.  */
558   if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
559     rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
560   /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
561      in the following conditions:
562      1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
563 	are turned off explicitly and OPTION_MASK_CRYPTO is not turned
564 	on explicitly.
565      2. If TARGET_ALTIVEC is turned off.  */
566   if ((flags & OPTION_MASK_CRYPTO) != 0)
567     rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
568   if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
569     {
570       rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
571       if (define_p)
572 	rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
573       else
574 	rs6000_define_or_undefine_macro (false, "__float128");
575       if (ieee128_float_type_node && define_p)
576 	rs6000_define_or_undefine_macro (true, "__SIZEOF_FLOAT128__=16");
577       else
578 	rs6000_define_or_undefine_macro (false, "__SIZEOF_FLOAT128__");
579     }
580   /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
581      via the target attribute/pragma.  */
582   if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
583     rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
584 
585   /* options from the builtin masks.  */
586   /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
587      PROCESSOR_CELL) (e.g. -mcpu=cell).  */
588   if ((bu_mask & RS6000_BTM_CELL) != 0)
589     rs6000_define_or_undefine_macro (define_p, "__PPU__");
590 
591   /* Tell the user if we support the MMA instructions.  */
592   if ((flags & OPTION_MASK_MMA) != 0)
593     rs6000_define_or_undefine_macro (define_p, "__MMA__");
594   /* Whether pc-relative code is being generated.  */
595   if ((flags & OPTION_MASK_PCREL) != 0)
596     rs6000_define_or_undefine_macro (define_p, "__PCREL__");
597   /* Tell the user -mrop-protect is in play.  */
598   if (rs6000_rop_protect)
599     rs6000_define_or_undefine_macro (define_p, "__ROP_PROTECT__");
600 }
601 
602 void
rs6000_cpu_cpp_builtins(cpp_reader * pfile)603 rs6000_cpu_cpp_builtins (cpp_reader *pfile)
604 {
605   /* Define all of the common macros.  */
606   rs6000_target_modify_macros (true, rs6000_isa_flags,
607 			       rs6000_builtin_mask_calculate ());
608 
609   if (TARGET_FRE)
610     builtin_define ("__RECIP__");
611   if (TARGET_FRES)
612     builtin_define ("__RECIPF__");
613   if (TARGET_FRSQRTE)
614     builtin_define ("__RSQRTE__");
615   if (TARGET_FRSQRTES)
616     builtin_define ("__RSQRTEF__");
617   if (TARGET_FLOAT128_TYPE)
618     builtin_define ("__FLOAT128_TYPE__");
619   if (ibm128_float_type_node)
620     builtin_define ("__SIZEOF_IBM128__=16");
621   if (ieee128_float_type_node)
622     builtin_define ("__SIZEOF_IEEE128__=16");
623 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
624   builtin_define ("__BUILTIN_CPU_SUPPORTS__");
625 #endif
626 
627   if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
628     {
629       /* Define the AltiVec syntactic elements.  */
630       builtin_define ("__vector=__attribute__((altivec(vector__)))");
631       builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
632       builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
633 
634       if (!flag_iso)
635 	{
636 	  builtin_define ("vector=vector");
637 	  builtin_define ("pixel=pixel");
638 	  builtin_define ("bool=bool");
639 	  builtin_define ("_Bool=_Bool");
640 	  init_vector_keywords ();
641 
642 	  /* Enable context-sensitive macros.  */
643 	  cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
644 	}
645     }
646   if (!TARGET_HARD_FLOAT)
647     builtin_define ("_SOFT_DOUBLE");
648   /* Used by lwarx/stwcx. errata work-around.  */
649   if (rs6000_cpu == PROCESSOR_PPC405)
650     builtin_define ("__PPC405__");
651   /* Used by libstdc++.  */
652   if (TARGET_NO_LWSYNC)
653     builtin_define ("__NO_LWSYNC__");
654 
655   if (TARGET_EXTRA_BUILTINS)
656     {
657       /* For the VSX builtin functions identical to Altivec functions, just map
658 	 the altivec builtin into the vsx version (the altivec functions
659 	 generate VSX code if -mvsx).  */
660       builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
661       builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
662       builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
663       builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
664       builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
665       builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
666       builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
667 
668       /* Also map the a and m versions of the multiply/add instructions to the
669 	 builtin for people blindly going off the instruction manual.  */
670       builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
671       builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
672       builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
673       builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
674       builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
675       builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
676       builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
677       builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
678       builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
679       builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
680       builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
681       builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
682       builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
683       builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
684       builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
685       builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
686     }
687 
688   /* Map the old _Float128 'q' builtins into the new 'f128' builtins.  */
689   if (TARGET_FLOAT128_TYPE)
690     {
691       builtin_define ("__builtin_fabsq=__builtin_fabsf128");
692       builtin_define ("__builtin_copysignq=__builtin_copysignf128");
693       builtin_define ("__builtin_nanq=__builtin_nanf128");
694       builtin_define ("__builtin_nansq=__builtin_nansf128");
695       builtin_define ("__builtin_infq=__builtin_inff128");
696       builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
697     }
698 
699   /* Tell users they can use __builtin_bswap{16,64}.  */
700   builtin_define ("__HAVE_BSWAP__");
701 
702   /* May be overridden by target configuration.  */
703   RS6000_CPU_CPP_ENDIAN_BUILTINS();
704 
705   if (TARGET_LONG_DOUBLE_128)
706     {
707       builtin_define ("__LONG_DOUBLE_128__");
708       builtin_define ("__LONGDOUBLE128");
709 
710       if (TARGET_IEEEQUAD)
711 	{
712 	  /* Older versions of GLIBC used __attribute__((__KC__)) to create the
713 	     IEEE 128-bit floating point complex type for C++ (which does not
714 	     support _Float128 _Complex).  If the default for long double is
715 	     IEEE 128-bit mode, the library would need to use
716 	     __attribute__((__TC__)) instead.  Defining __KF__ and __KC__
717 	     is a stop-gap to build with the older libraries, until we
718 	     get an updated library.  */
719 	  builtin_define ("__LONG_DOUBLE_IEEE128__");
720 	  builtin_define ("__KF__=__TF__");
721 	  builtin_define ("__KC__=__TC__");
722 	}
723       else
724 	builtin_define ("__LONG_DOUBLE_IBM128__");
725     }
726 
727   switch (TARGET_CMODEL)
728     {
729       /* Deliberately omit __CMODEL_SMALL__ since that was the default
730 	 before --mcmodel support was added.  */
731     case CMODEL_MEDIUM:
732       builtin_define ("__CMODEL_MEDIUM__");
733       break;
734     case CMODEL_LARGE:
735       builtin_define ("__CMODEL_LARGE__");
736       break;
737     default:
738       break;
739     }
740 
741   switch (rs6000_current_abi)
742     {
743     case ABI_V4:
744       builtin_define ("_CALL_SYSV");
745       break;
746     case ABI_AIX:
747       builtin_define ("_CALL_AIXDESC");
748       builtin_define ("_CALL_AIX");
749       builtin_define ("_CALL_ELF=1");
750       break;
751     case ABI_ELFv2:
752       builtin_define ("_CALL_ELF=2");
753       break;
754     case ABI_DARWIN:
755       builtin_define ("_CALL_DARWIN");
756       break;
757     default:
758       break;
759     }
760 
761   /* Vector element order.  */
762   if (BYTES_BIG_ENDIAN)
763     builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
764   else
765     builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
766 
767   /* Let the compiled code know if 'f' class registers will not be available.  */
768   if (TARGET_SOFT_FLOAT)
769     builtin_define ("__NO_FPRS__");
770 
771   /* Whether aggregates passed by value are aligned to a 16 byte boundary
772      if their alignment is 16 bytes or larger.  */
773   if ((TARGET_MACHO && rs6000_darwin64_abi)
774       || DEFAULT_ABI == ABI_ELFv2
775       || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
776     builtin_define ("__STRUCT_PARM_ALIGN__=16");
777 }
778 
779 
780 
781 /* Convert a type stored into a struct altivec_builtin_types as ID,
782    into a tree.  The types are in rs6000_builtin_types: negative values
783    create a pointer type for the type associated to ~ID.  Note it is
784    a logical NOT, rather than a negation, otherwise you cannot represent
785    a pointer type for ID 0.  */
786 
787 static inline tree
rs6000_builtin_type(int id)788 rs6000_builtin_type (int id)
789 {
790   tree t;
791   t = rs6000_builtin_types[id < 0 ? ~id : id];
792   return id < 0 ? build_pointer_type (t) : t;
793 }
794 
795 /* Check whether the type of an argument, T, is compatible with a type ID
796    stored into a struct altivec_builtin_types.  Integer types are considered
797    compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
798    the decision.  Also allow long double and _Float128 to be compatible if
799    -mabi=ieeelongdouble.  */
800 
801 static inline bool
is_float128_p(tree t)802 is_float128_p (tree t)
803 {
804   return (t == float128_type_node
805 	  || (TARGET_IEEEQUAD
806 	      && TARGET_LONG_DOUBLE_128
807 	      && t == long_double_type_node));
808 }
809 
810 
811 /* Return true iff ARGTYPE can be compatibly passed as PARMTYPE.  */
812 static bool
rs6000_builtin_type_compatible(tree parmtype,tree argtype)813 rs6000_builtin_type_compatible (tree parmtype, tree argtype)
814 {
815   if (parmtype == error_mark_node)
816     return false;
817 
818   if (INTEGRAL_TYPE_P (parmtype) && INTEGRAL_TYPE_P (argtype))
819     return true;
820 
821   if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
822       && is_float128_p (parmtype) && is_float128_p (argtype))
823     return true;
824 
825   if (POINTER_TYPE_P (parmtype) && POINTER_TYPE_P (argtype))
826     {
827       parmtype = TREE_TYPE (parmtype);
828       argtype = TREE_TYPE (argtype);
829       if (TYPE_READONLY (argtype))
830 	parmtype = build_qualified_type (parmtype, TYPE_QUAL_CONST);
831     }
832 
833   return lang_hooks.types_compatible_p (parmtype, argtype);
834 }
835 
836 /* In addition to calling fold_convert for EXPR of type TYPE, also
837    call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
838    hiding there (PR47197).  */
839 
840 static tree
fully_fold_convert(tree type,tree expr)841 fully_fold_convert (tree type, tree expr)
842 {
843   tree result = fold_convert (type, expr);
844   bool maybe_const = true;
845 
846   if (!c_dialect_cxx ())
847     result = c_fully_fold (result, false, &maybe_const);
848 
849   return result;
850 }
851 
852 /* Build a tree for a function call to an Altivec non-overloaded builtin.
853    The overloaded builtin that matched the types and args is described
854    by DESC.  The N arguments are given in ARGS, respectively.
855 
856    Actually the only thing it does is calling fold_convert on ARGS, with
857    a small exception for vec_{all,any}_{ge,le} predicates. */
858 
859 static tree
altivec_build_resolved_builtin(tree * args,int n,tree fntype,tree ret_type,rs6000_gen_builtins bif_id,rs6000_gen_builtins ovld_id)860 altivec_build_resolved_builtin (tree *args, int n, tree fntype, tree ret_type,
861 				rs6000_gen_builtins bif_id,
862 				rs6000_gen_builtins ovld_id)
863 {
864   tree argtypes = TYPE_ARG_TYPES (fntype);
865   tree arg_type[MAX_OVLD_ARGS];
866   tree fndecl = rs6000_builtin_decls[bif_id];
867 
868   for (int i = 0; i < n; i++)
869     {
870       arg_type[i] = TREE_VALUE (argtypes);
871       argtypes = TREE_CHAIN (argtypes);
872     }
873 
874   /* The AltiVec overloading implementation is overall gross, but this
875      is particularly disgusting.  The vec_{all,any}_{ge,le} builtins
876      are completely different for floating-point vs. integer vector
877      types, because the former has vcmpgefp, but the latter should use
878      vcmpgtXX.
879 
880      In practice, the second and third arguments are swapped, and the
881      condition (LT vs. EQ, which is recognizable by bit 1 of the first
882      argument) is reversed.  Patch the arguments here before building
883      the resolved CALL_EXPR.  */
884   if (n == 3
885       && ovld_id == RS6000_OVLD_VEC_CMPGE_P
886       && bif_id != RS6000_BIF_VCMPGEFP_P
887       && bif_id != RS6000_BIF_XVCMPGEDP_P)
888     {
889       std::swap (args[1], args[2]);
890       std::swap (arg_type[1], arg_type[2]);
891 
892       args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
893 			     build_int_cst (NULL_TREE, 2));
894     }
895 
896   for (int j = 0; j < n; j++)
897     args[j] = fully_fold_convert (arg_type[j], args[j]);
898 
899   /* If the number of arguments to an overloaded function increases,
900      we must expand this switch.  */
901   gcc_assert (MAX_OVLD_ARGS <= 4);
902 
903   tree call;
904   switch (n)
905     {
906     case 0:
907       call = build_call_expr (fndecl, 0);
908       break;
909     case 1:
910       call = build_call_expr (fndecl, 1, args[0]);
911       break;
912     case 2:
913       call = build_call_expr (fndecl, 2, args[0], args[1]);
914       break;
915     case 3:
916       call = build_call_expr (fndecl, 3, args[0], args[1], args[2]);
917       break;
918     case 4:
919       call = build_call_expr (fndecl, 4, args[0], args[1], args[2], args[3]);
920       break;
921     default:
922       gcc_unreachable ();
923     }
924   return fold_convert (ret_type, call);
925 }
926 
927 /* Enumeration of possible results from attempted overload resolution.
928    This is used by special-case helper functions to tell their caller
929    whether they succeeded and what still needs to be done.
930 
931 	unresolved = Still needs processing
932 	  resolved = Resolved (but may be an error_mark_node)
933       resolved_bad = An error that needs handling by the caller.  */
934 
935 enum resolution { unresolved, resolved, resolved_bad };
936 
937 /* Resolve an overloaded vec_mul call and return a tree expression for the
938    resolved call if successful.  ARGS contains the arguments to the call.
939    TYPES contains their types.  RES must be set to indicate the status of
940    the resolution attempt.  LOC contains statement location information.  */
941 
942 static tree
resolve_vec_mul(resolution * res,tree * args,tree * types,location_t loc)943 resolve_vec_mul (resolution *res, tree *args, tree *types, location_t loc)
944 {
945   /* vec_mul needs to be special cased because there are no instructions for it
946      for the {un}signed char, {un}signed short, and {un}signed int types.  */
947 
948   /* Both arguments must be vectors and the types must be compatible.  */
949   if (TREE_CODE (types[0]) != VECTOR_TYPE
950       || !lang_hooks.types_compatible_p (types[0], types[1]))
951     {
952       *res = resolved_bad;
953       return error_mark_node;
954     }
955 
956   switch (TYPE_MODE (TREE_TYPE (types[0])))
957     {
958     case E_QImode:
959     case E_HImode:
960     case E_SImode:
961     case E_DImode:
962     case E_TImode:
963       /* For scalar types just use a multiply expression.  */
964       *res = resolved;
965       return fold_build2_loc (loc, MULT_EXPR, types[0], args[0],
966 			      fold_convert (types[0], args[1]));
967     case E_SFmode:
968       {
969 	/* For floats use the xvmulsp instruction directly.  */
970 	*res = resolved;
971 	tree call = rs6000_builtin_decls[RS6000_BIF_XVMULSP];
972 	return build_call_expr (call, 2, args[0], args[1]);
973       }
974     case E_DFmode:
975       {
976 	/* For doubles use the xvmuldp instruction directly.  */
977 	*res = resolved;
978 	tree call = rs6000_builtin_decls[RS6000_BIF_XVMULDP];
979 	return build_call_expr (call, 2, args[0], args[1]);
980       }
981     /* Other types are errors.  */
982     default:
983       *res = resolved_bad;
984       return error_mark_node;
985     }
986 }
987 
988 /* Resolve an overloaded vec_cmpne call and return a tree expression for the
989    resolved call if successful.  ARGS contains the arguments to the call.
990    TYPES contains their types.  RES must be set to indicate the status of
991    the resolution attempt.  LOC contains statement location information.  */
992 
993 static tree
resolve_vec_cmpne(resolution * res,tree * args,tree * types,location_t loc)994 resolve_vec_cmpne (resolution *res, tree *args, tree *types, location_t loc)
995 {
996   /* vec_cmpne needs to be special cased because there are no instructions
997      for it (prior to power 9).  */
998 
999   /* Both arguments must be vectors and the types must be compatible.  */
1000   if (TREE_CODE (types[0]) != VECTOR_TYPE
1001       || !lang_hooks.types_compatible_p (types[0], types[1]))
1002     {
1003       *res = resolved_bad;
1004       return error_mark_node;
1005     }
1006 
1007   machine_mode arg0_elt_mode = TYPE_MODE (TREE_TYPE (types[0]));
1008 
1009   /* Power9 instructions provide the most efficient implementation of
1010      ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1011      or SFmode or DFmode.  */
1012   if (!TARGET_P9_VECTOR
1013       || arg0_elt_mode == DImode
1014       || arg0_elt_mode == TImode
1015       || arg0_elt_mode == SFmode
1016       || arg0_elt_mode == DFmode)
1017     {
1018       switch (arg0_elt_mode)
1019 	{
1020 	  /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1021 					     vec_cmpeq (va, vb)).  */
1022 	  /* Note:  vec_nand also works but opt changes vec_nand's
1023 	     to vec_nor's anyway.  */
1024 	case E_QImode:
1025 	case E_HImode:
1026 	case E_SImode:
1027 	case E_DImode:
1028 	case E_TImode:
1029 	case E_SFmode:
1030 	case E_DFmode:
1031 	  {
1032 	    /* call = vec_cmpeq (va, vb)
1033 	       result = vec_nor (call, call).  */
1034 	    vec<tree, va_gc> *params = make_tree_vector ();
1035 	    vec_safe_push (params, args[0]);
1036 	    vec_safe_push (params, args[1]);
1037 	    tree decl = rs6000_builtin_decls[RS6000_OVLD_VEC_CMPEQ];
1038 	    tree call = altivec_resolve_overloaded_builtin (loc, decl, params);
1039 	    /* Use save_expr to ensure that operands used more than once
1040 	       that may have side effects (like calls) are only evaluated
1041 	       once.  */
1042 	    call = save_expr (call);
1043 	    params = make_tree_vector ();
1044 	    vec_safe_push (params, call);
1045 	    vec_safe_push (params, call);
1046 	    decl = rs6000_builtin_decls[RS6000_OVLD_VEC_NOR];
1047 	    *res = resolved;
1048 	    return altivec_resolve_overloaded_builtin (loc, decl, params);
1049 	  }
1050 	  /* Other types are errors.  */
1051 	default:
1052 	  *res = resolved_bad;
1053 	  return error_mark_node;
1054 	}
1055     }
1056 
1057   /* Otherwise this call is unresolved, and altivec_resolve_overloaded_builtin
1058      will later process the Power9 alternative.  */
1059   *res = unresolved;
1060   return error_mark_node;
1061 }
1062 
1063 /* Resolve an overloaded vec_adde or vec_sube call and return a tree expression
1064    for the resolved call if successful.  ARGS contains the arguments to the
1065    call.  TYPES contains their arguments.  RES must be set to indicate the
1066    status of the resolution attempt.  LOC contains statement location
1067    information.  */
1068 
1069 static tree
resolve_vec_adde_sube(resolution * res,rs6000_gen_builtins fcode,tree * args,tree * types,location_t loc)1070 resolve_vec_adde_sube (resolution *res, rs6000_gen_builtins fcode,
1071 		       tree *args, tree *types, location_t loc)
1072 {
1073   /* vec_adde needs to be special cased because there is no instruction
1074      for the {un}signed int version.  */
1075 
1076   /* All 3 arguments must be vectors of (signed or unsigned) (int or
1077      __int128) and the types must be compatible.  */
1078   if (TREE_CODE (types[0]) != VECTOR_TYPE
1079       || !lang_hooks.types_compatible_p (types[0], types[1])
1080       || !lang_hooks.types_compatible_p (types[1], types[2]))
1081     {
1082       *res = resolved_bad;
1083       return error_mark_node;
1084     }
1085 
1086   switch (TYPE_MODE (TREE_TYPE (types[0])))
1087     {
1088       /* For {un}signed ints,
1089 	 vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1090 					       vec_and (carryv, 1)).
1091 	 vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1092 					       vec_and (carryv, 1)).  */
1093     case E_SImode:
1094       {
1095 	vec<tree, va_gc> *params = make_tree_vector ();
1096 	vec_safe_push (params, args[0]);
1097 	vec_safe_push (params, args[1]);
1098 
1099 	tree add_sub_builtin;
1100 	if (fcode == RS6000_OVLD_VEC_ADDE)
1101 	  add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD];
1102 	else
1103 	  add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB];
1104 
1105 	tree call = altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1106 							params);
1107 	tree const1 = build_int_cstu (TREE_TYPE (types[0]), 1);
1108 	tree ones_vector = build_vector_from_val (types[0], const1);
1109 	tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, types[0],
1110 					 args[2], ones_vector);
1111 	params = make_tree_vector ();
1112 	vec_safe_push (params, call);
1113 	vec_safe_push (params, and_expr);
1114 	*res = resolved;
1115 	return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1116 						   params);
1117       }
1118       /* For {un}signed __int128s use the vaddeuqm/vsubeuqm instruction
1119 	 directly using the standard machinery.  */
1120     case E_TImode:
1121       *res = unresolved;
1122       break;
1123 
1124       /* Types other than {un}signed int and {un}signed __int128
1125 	 are errors.  */
1126     default:
1127       *res = resolved_bad;
1128     }
1129 
1130   return error_mark_node;
1131 }
1132 
1133 /* Resolve an overloaded vec_addec or vec_subec call and return a tree
1134    expression for the resolved call if successful.  ARGS contains the arguments
1135    to the call.  TYPES contains their types.  RES must be set to indicate the
1136    status of the resolution attempt.  LOC contains statement location
1137    information.  */
1138 
1139 static tree
resolve_vec_addec_subec(resolution * res,rs6000_gen_builtins fcode,tree * args,tree * types,location_t loc)1140 resolve_vec_addec_subec (resolution *res, rs6000_gen_builtins fcode,
1141 			 tree *args, tree *types, location_t loc)
1142 {
1143   /* vec_addec and vec_subec needs to be special cased because there is
1144      no instruction for the (un)signed int version.  */
1145 
1146   /* All 3 arguments must be vectors of (signed or unsigned) (int or
1147      __int128) and the types must be compatible.  */
1148   if (TREE_CODE (types[0]) != VECTOR_TYPE
1149       || !lang_hooks.types_compatible_p (types[0], types[1])
1150       || !lang_hooks.types_compatible_p (types[1], types[2]))
1151     {
1152       *res = resolved_bad;
1153       return error_mark_node;
1154     }
1155 
1156   switch (TYPE_MODE (TREE_TYPE (types[0])))
1157     {
1158       /* For {un}signed ints,
1159 	   vec_addec (va, vb, carryv) ==
1160 	     vec_or (vec_addc (va, vb),
1161 		     vec_addc (vec_add (va, vb),
1162 			       vec_and (carryv, 0x1))).  */
1163     case E_SImode:
1164       {
1165 	/* Use save_expr to ensure that operands used more than once that may
1166 	   have side effects (like calls) are only evaluated once.  */
1167 	args[0] = save_expr (args[0]);
1168 	args[1] = save_expr (args[1]);
1169 	vec<tree, va_gc> *params = make_tree_vector ();
1170 	vec_safe_push (params, args[0]);
1171 	vec_safe_push (params, args[1]);
1172 
1173 	tree as_c_builtin;
1174 	if (fcode == RS6000_OVLD_VEC_ADDEC)
1175 	  as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADDC];
1176 	else
1177 	  as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUBC];
1178 
1179 	tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1180 							 params);
1181 	params = make_tree_vector ();
1182 	vec_safe_push (params, args[0]);
1183 	vec_safe_push (params, args[1]);
1184 
1185 	tree as_builtin;
1186 	if (fcode == RS6000_OVLD_VEC_ADDEC)
1187 	  as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD];
1188 	else
1189 	  as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB];
1190 
1191 	tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1192 							 params);
1193 	tree const1 = build_int_cstu (TREE_TYPE (types[0]), 1);
1194 	tree ones_vector = build_vector_from_val (types[0], const1);
1195 	tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, types[0],
1196 					 args[2], ones_vector);
1197 	params = make_tree_vector ();
1198 	vec_safe_push (params, call2);
1199 	vec_safe_push (params, and_expr);
1200 	call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin, params);
1201 	params = make_tree_vector ();
1202 	vec_safe_push (params, call1);
1203 	vec_safe_push (params, call2);
1204 	tree or_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_OR];
1205 	*res = resolved;
1206 	return altivec_resolve_overloaded_builtin (loc, or_builtin, params);
1207       }
1208       /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1209 	 instructions.  This occurs through normal processing.  */
1210     case E_TImode:
1211       *res = unresolved;
1212       break;
1213 
1214       /* Types other than {un}signed int and {un}signed __int128
1215 	 are errors.  */
1216     default:
1217       *res = resolved_bad;
1218     }
1219 
1220   return error_mark_node;
1221 }
1222 
1223 /* Resolve an overloaded vec_splats or vec_promote call and return a tree
1224    expression for the resolved call if successful.  NARGS is the number of
1225    arguments to the call.  ARGLIST contains the arguments.  RES must be set
1226    to indicate the status of the resolution attempt.  */
1227 
1228 static tree
resolve_vec_splats(resolution * res,rs6000_gen_builtins fcode,vec<tree,va_gc> * arglist,unsigned nargs)1229 resolve_vec_splats (resolution *res, rs6000_gen_builtins fcode,
1230 		    vec<tree, va_gc> *arglist, unsigned nargs)
1231 {
1232   const char *name;
1233   name = fcode == RS6000_OVLD_VEC_SPLATS ? "vec_splats" : "vec_promote";
1234 
1235   if (fcode == RS6000_OVLD_VEC_SPLATS && nargs != 1)
1236     {
1237       error ("builtin %qs only accepts 1 argument", name);
1238       *res = resolved;
1239       return error_mark_node;
1240     }
1241 
1242   if (fcode == RS6000_OVLD_VEC_PROMOTE && nargs != 2)
1243     {
1244       error ("builtin %qs only accepts 2 arguments", name);
1245       *res = resolved;
1246       return error_mark_node;
1247     }
1248 
1249   /* Ignore promote's element argument.  */
1250   if (fcode == RS6000_OVLD_VEC_PROMOTE
1251       && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1252     {
1253       *res = resolved_bad;
1254       return error_mark_node;
1255     }
1256 
1257   tree arg = (*arglist)[0];
1258   tree type = TREE_TYPE (arg);
1259 
1260   if (!SCALAR_FLOAT_TYPE_P (type) && !INTEGRAL_TYPE_P (type))
1261     {
1262       *res = resolved_bad;
1263       return error_mark_node;
1264     }
1265 
1266   bool unsigned_p = TYPE_UNSIGNED (type);
1267   int size;
1268 
1269   switch (TYPE_MODE (type))
1270     {
1271     case E_TImode:
1272       type = unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node;
1273       size = 1;
1274       break;
1275     case E_DImode:
1276       type = unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node;
1277       size = 2;
1278       break;
1279     case E_SImode:
1280       type = unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node;
1281       size = 4;
1282       break;
1283     case E_HImode:
1284       type = unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node;
1285       size = 8;
1286       break;
1287     case E_QImode:
1288       type = unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node;
1289       size = 16;
1290       break;
1291     case E_SFmode:
1292       type = V4SF_type_node;
1293       size = 4;
1294       break;
1295     case E_DFmode:
1296       type = V2DF_type_node;
1297       size = 2;
1298       break;
1299     default:
1300       *res = resolved_bad;
1301       return error_mark_node;
1302     }
1303 
1304   arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1305   vec<constructor_elt, va_gc> *vec;
1306   vec_alloc (vec, size);
1307 
1308   for (int i = 0; i < size; i++)
1309     {
1310       constructor_elt elt = {NULL_TREE, arg};
1311       vec->quick_push (elt);
1312     }
1313 
1314   *res = resolved;
1315   return build_constructor (type, vec);
1316 }
1317 
1318 /* Resolve an overloaded vec_extract call and return a tree expression for
1319    the resolved call if successful.  NARGS is the number of arguments to
1320    the call.  ARGLIST contains the arguments.  RES must be set to indicate
1321    the status of the resolution attempt.  LOC contains statement location
1322    information.  */
1323 
1324 static tree
resolve_vec_extract(resolution * res,vec<tree,va_gc> * arglist,unsigned nargs,location_t loc)1325 resolve_vec_extract (resolution *res, vec<tree, va_gc> *arglist,
1326 		     unsigned nargs, location_t loc)
1327 {
1328   if (nargs != 2)
1329     {
1330       error ("builtin %qs only accepts 2 arguments", "vec_extract");
1331       *res = resolved;
1332       return error_mark_node;
1333     }
1334 
1335   tree arg1 = (*arglist)[0];
1336   tree arg1_type = TREE_TYPE (arg1);
1337   tree arg2 = (*arglist)[1];
1338 
1339   if (TREE_CODE (arg1_type) != VECTOR_TYPE
1340       || !INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1341     {
1342       *res = resolved_bad;
1343       return error_mark_node;
1344     }
1345 
1346   /* See if we can optimize vec_extract with the current VSX instruction
1347      set.  */
1348   machine_mode mode = TYPE_MODE (arg1_type);
1349   tree arg1_inner_type;
1350 
1351   if (VECTOR_MEM_VSX_P (mode))
1352     {
1353       tree call = NULL_TREE;
1354       int nunits = GET_MODE_NUNITS (mode);
1355       arg2 = fold_for_warn (arg2);
1356 
1357       /* If the second argument is an integer constant, generate
1358 	 the built-in code if we can.  We need 64-bit and direct
1359 	 move to extract the small integer vectors.  */
1360       if (TREE_CODE (arg2) == INTEGER_CST)
1361 	{
1362 	  wide_int selector = wi::to_wide (arg2);
1363 	  selector = wi::umod_trunc (selector, nunits);
1364 	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1365 	  switch (mode)
1366 	    {
1367 	    case E_V1TImode:
1368 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V1TI];
1369 	      break;
1370 
1371 	    case E_V2DFmode:
1372 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF];
1373 	      break;
1374 
1375 	    case E_V2DImode:
1376 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI];
1377 	      break;
1378 
1379 	    case E_V4SFmode:
1380 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF];
1381 	      break;
1382 
1383 	    case E_V4SImode:
1384 	      if (TARGET_DIRECT_MOVE_64BIT)
1385 		call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI];
1386 	      break;
1387 
1388 	    case E_V8HImode:
1389 	      if (TARGET_DIRECT_MOVE_64BIT)
1390 		call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI];
1391 	      break;
1392 
1393 	    case E_V16QImode:
1394 	      if (TARGET_DIRECT_MOVE_64BIT)
1395 		call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI];
1396 	      break;
1397 
1398 	    default:
1399 	      break;
1400 	    }
1401 	}
1402 
1403       /* If the second argument is variable, we can optimize it if we are
1404 	 generating 64-bit code on a machine with direct move.  */
1405       else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1406 	{
1407 	  switch (mode)
1408 	    {
1409 	    case E_V2DFmode:
1410 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF];
1411 	      break;
1412 
1413 	    case E_V2DImode:
1414 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI];
1415 	      break;
1416 
1417 	    case E_V4SFmode:
1418 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF];
1419 	      break;
1420 
1421 	    case E_V4SImode:
1422 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI];
1423 	      break;
1424 
1425 	    case E_V8HImode:
1426 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI];
1427 	      break;
1428 
1429 	    case E_V16QImode:
1430 	      call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI];
1431 	      break;
1432 
1433 	    default:
1434 	      break;
1435 	    }
1436 	}
1437 
1438       if (call)
1439 	{
1440 	  tree result = build_call_expr (call, 2, arg1, arg2);
1441 	  /* Coerce the result to vector element type.  May be no-op.  */
1442 	  arg1_inner_type = TREE_TYPE (arg1_type);
1443 	  result = fold_convert (arg1_inner_type, result);
1444 	  *res = resolved;
1445 	  return result;
1446 	}
1447     }
1448 
1449   /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2). */
1450   arg1_inner_type = TREE_TYPE (arg1_type);
1451   tree subp = build_int_cst (TREE_TYPE (arg2),
1452 			     TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
1453   arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, subp, 0);
1454 
1455   tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1456   DECL_EXTERNAL (decl) = 0;
1457   TREE_PUBLIC (decl) = 0;
1458   DECL_CONTEXT (decl) = current_function_decl;
1459   TREE_USED (decl) = 1;
1460   TREE_TYPE (decl) = arg1_type;
1461   TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1462 
1463   tree stmt;
1464   if (c_dialect_cxx ())
1465     {
1466       stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE);
1467       SET_EXPR_LOCATION (stmt, loc);
1468     }
1469   else
1470     {
1471       DECL_INITIAL (decl) = arg1;
1472       stmt = build1 (DECL_EXPR, arg1_type, decl);
1473       TREE_ADDRESSABLE (decl) = 1;
1474       SET_EXPR_LOCATION (stmt, loc);
1475       stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1476     }
1477 
1478   tree innerptrtype = build_pointer_type (arg1_inner_type);
1479   stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1480   stmt = convert (innerptrtype, stmt);
1481   stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1482   stmt = build_indirect_ref (loc, stmt, RO_NULL);
1483 
1484   /* PR83660: We mark this as having side effects so that downstream in
1485      fold_build_cleanup_point_expr () it will get a CLEANUP_POINT_EXPR.  If it
1486      does not we can run into an ICE later in gimplify_cleanup_point_expr ().
1487      Potentially this causes missed optimization because there actually is no
1488      side effect.  */
1489   if (c_dialect_cxx ())
1490     TREE_SIDE_EFFECTS (stmt) = 1;
1491 
1492   *res = resolved;
1493   return stmt;
1494 }
1495 
1496 /* Resolve an overloaded vec_insert call and return a tree expression for
1497    the resolved call if successful.  NARGS is the number of arguments to
1498    the call.  ARGLIST contains the arguments.  RES must be set to indicate
1499    the status of the resolution attempt.  LOC contains statement location
1500    information.  */
1501 
1502 static tree
resolve_vec_insert(resolution * res,vec<tree,va_gc> * arglist,unsigned nargs,location_t loc)1503 resolve_vec_insert (resolution *res, vec<tree, va_gc> *arglist,
1504 		    unsigned nargs, location_t loc)
1505 {
1506   if (nargs != 3)
1507     {
1508       error ("builtin %qs only accepts 3 arguments", "vec_insert");
1509       *res = resolved;
1510       return error_mark_node;
1511     }
1512 
1513   tree arg0 = (*arglist)[0];
1514   tree arg1 = (*arglist)[1];
1515   tree arg1_type = TREE_TYPE (arg1);
1516   tree arg2 = fold_for_warn ((*arglist)[2]);
1517 
1518   if (TREE_CODE (arg1_type) != VECTOR_TYPE
1519       || !INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1520     {
1521       *res = resolved_bad;
1522       return error_mark_node;
1523     }
1524 
1525   /* If we can use the VSX xxpermdi instruction, use that for insert.  */
1526   machine_mode mode = TYPE_MODE (arg1_type);
1527 
1528   if ((mode == V2DFmode || mode == V2DImode)
1529       && VECTOR_UNIT_VSX_P (mode)
1530       && TREE_CODE (arg2) == INTEGER_CST)
1531     {
1532       wide_int selector = wi::to_wide (arg2);
1533       selector = wi::umod_trunc (selector, 2);
1534       arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1535 
1536       tree call = NULL_TREE;
1537       if (mode == V2DFmode)
1538 	call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DF];
1539       else if (mode == V2DImode)
1540 	call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DI];
1541 
1542       /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1543 	 reversed.  */
1544       if (call)
1545 	{
1546 	  *res = resolved;
1547 	  return build_call_expr (call, 3, arg1, arg0, arg2);
1548 	}
1549     }
1550 
1551   else if (mode == V1TImode
1552 	   && VECTOR_UNIT_VSX_P (mode)
1553 	   && TREE_CODE (arg2) == INTEGER_CST)
1554     {
1555       tree call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V1TI];
1556       wide_int selector = wi::zero(32);
1557       arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1558 
1559       /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1560 	 reversed.  */
1561       *res = resolved;
1562       return build_call_expr (call, 3, arg1, arg0, arg2);
1563     }
1564 
1565   /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2) = arg0 with
1566      VIEW_CONVERT_EXPR.  i.e.:
1567        D.3192 = v1;
1568        _1 = n & 3;
1569        VIEW_CONVERT_EXPR<int[4]>(D.3192)[_1] = i;
1570        v1 = D.3192;
1571        D.3194 = v1;  */
1572   if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1573     arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1574   else
1575     {
1576       tree c = build_int_cst (TREE_TYPE (arg2),
1577 			      TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
1578       arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, c, 0);
1579     }
1580 
1581   tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1582   DECL_EXTERNAL (decl) = 0;
1583   TREE_PUBLIC (decl) = 0;
1584   DECL_CONTEXT (decl) = current_function_decl;
1585   TREE_USED (decl) = 1;
1586   TREE_TYPE (decl) = arg1_type;
1587   TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1588   TREE_ADDRESSABLE (decl) = 1;
1589 
1590   tree stmt;
1591   if (c_dialect_cxx ())
1592     {
1593       stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE);
1594       SET_EXPR_LOCATION (stmt, loc);
1595     }
1596   else
1597     {
1598       DECL_INITIAL (decl) = arg1;
1599       stmt = build1 (DECL_EXPR, arg1_type, decl);
1600       SET_EXPR_LOCATION (stmt, loc);
1601       stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1602     }
1603 
1604   if (TARGET_VSX)
1605     {
1606       stmt = build_array_ref (loc, stmt, arg2);
1607       stmt = fold_build2 (MODIFY_EXPR, TREE_TYPE (arg0), stmt,
1608 			  convert (TREE_TYPE (stmt), arg0));
1609       stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1610     }
1611   else
1612     {
1613       tree arg1_inner_type = TREE_TYPE (arg1_type);
1614       tree innerptrtype = build_pointer_type (arg1_inner_type);
1615       stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1616       stmt = convert (innerptrtype, stmt);
1617       stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1618       stmt = build_indirect_ref (loc, stmt, RO_NULL);
1619       stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1620 		     convert (TREE_TYPE (stmt), arg0));
1621       stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1622     }
1623 
1624   *res = resolved;
1625   return stmt;
1626 }
1627 
1628 /* Resolve an overloaded vec_step call and return a tree expression for
1629    the resolved call if successful.  NARGS is the number of arguments to
1630    the call.  ARGLIST contains the arguments.  RES must be set to indicate
1631    the status of the resolution attempt.  */
1632 
1633 static tree
resolve_vec_step(resolution * res,vec<tree,va_gc> * arglist,unsigned nargs)1634 resolve_vec_step (resolution *res, vec<tree, va_gc> *arglist, unsigned nargs)
1635 {
1636   if (nargs != 1)
1637     {
1638       error ("builtin %qs only accepts 1 argument", "vec_step");
1639       *res = resolved;
1640       return error_mark_node;
1641     }
1642 
1643   tree arg0 = (*arglist)[0];
1644   tree arg0_type = TREE_TYPE (arg0);
1645 
1646   if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1647     {
1648       *res = resolved_bad;
1649       return error_mark_node;
1650     }
1651 
1652   *res = resolved;
1653   return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (arg0_type));
1654 }
1655 
1656 /* Look for a matching instance in a chain of instances.  INSTANCE points to
1657    the chain of instances; INSTANCE_CODE is the code identifying the specific
1658    built-in being searched for; FCODE is the overloaded function code; TYPES
1659    contains an array of two types that must match the types of the instance's
1660    parameters; and ARGS contains an array of two arguments to be passed to
1661    the instance.  If found, resolve the built-in and return it, unless the
1662    built-in is not supported in context.  In that case, set
1663    UNSUPPORTED_BUILTIN to true.  If we don't match, return error_mark_node
1664    and leave UNSUPPORTED_BUILTIN alone.  */
1665 
1666 static tree
find_instance(bool * unsupported_builtin,int * instance,rs6000_gen_builtins instance_code,rs6000_gen_builtins fcode,tree * types,tree * args)1667 find_instance (bool *unsupported_builtin, int *instance,
1668 	       rs6000_gen_builtins instance_code,
1669 	       rs6000_gen_builtins fcode,
1670 	       tree *types, tree *args)
1671 {
1672   while (*instance != -1
1673 	 && rs6000_instance_info[*instance].bifid != instance_code)
1674     *instance = rs6000_instance_info[*instance].next;
1675 
1676   int inst = *instance;
1677   gcc_assert (inst != -1);
1678   /* It is possible for an instance to require a data type that isn't
1679      defined on this target, in which case rs6000_instance_info_fntype[inst]
1680      will be NULL.  */
1681   if (!rs6000_instance_info_fntype[inst])
1682     return error_mark_node;
1683   rs6000_gen_builtins bifid = rs6000_instance_info[inst].bifid;
1684   tree fntype = rs6000_builtin_info_fntype[bifid];
1685   tree parmtype0 = TREE_VALUE (TYPE_ARG_TYPES (fntype));
1686   tree parmtype1 = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (fntype)));
1687 
1688   if (rs6000_builtin_type_compatible (types[0], parmtype0)
1689       && rs6000_builtin_type_compatible (types[1], parmtype1))
1690     {
1691       if (rs6000_builtin_decl (bifid, false) != error_mark_node
1692 	  && rs6000_builtin_is_supported (bifid))
1693 	{
1694 	  tree ret_type = TREE_TYPE (rs6000_instance_info_fntype[inst]);
1695 	  return altivec_build_resolved_builtin (args, 2, fntype, ret_type,
1696 						 bifid, fcode);
1697 	}
1698       else
1699 	*unsupported_builtin = true;
1700     }
1701 
1702   return error_mark_node;
1703 }
1704 
1705 /* Implementation of the resolve_overloaded_builtin target hook, to
1706    support Altivec's overloaded builtins.  */
1707 
1708 tree
altivec_resolve_overloaded_builtin(location_t loc,tree fndecl,void * passed_arglist)1709 altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
1710 				    void *passed_arglist)
1711 {
1712   rs6000_gen_builtins fcode
1713     = (rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
1714 
1715   /* Return immediately if this isn't an overload.  */
1716   if (fcode <= RS6000_OVLD_NONE)
1717     return NULL_TREE;
1718 
1719   if (TARGET_DEBUG_BUILTIN)
1720     fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
1721 	     (int) fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
1722 
1723   /* vec_lvsl and vec_lvsr are deprecated for use with LE element order.  */
1724   if (fcode == RS6000_OVLD_VEC_LVSL && !BYTES_BIG_ENDIAN)
1725     warning (OPT_Wdeprecated,
1726 	     "%<vec_lvsl%> is deprecated for little endian; use "
1727 	     "assignment for unaligned loads and stores");
1728   else if (fcode == RS6000_OVLD_VEC_LVSR && !BYTES_BIG_ENDIAN)
1729     warning (OPT_Wdeprecated,
1730 	     "%<vec_lvsr%> is deprecated for little endian; use "
1731 	     "assignment for unaligned loads and stores");
1732 
1733   /* Gather the arguments and their types into arrays for easier handling.  */
1734   tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
1735   tree types[MAX_OVLD_ARGS];
1736   tree args[MAX_OVLD_ARGS];
1737   unsigned int n;
1738 
1739   /* Count the number of expected arguments.  */
1740   unsigned expected_args = 0;
1741   for (tree chain = fnargs;
1742        chain && !VOID_TYPE_P (TREE_VALUE (chain));
1743        chain = TREE_CHAIN (chain))
1744     expected_args++;
1745 
1746   vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
1747   unsigned int nargs = vec_safe_length (arglist);
1748 
1749   /* If the number of arguments did not match the prototype, return NULL
1750      and the generic code will issue the appropriate error message.  Skip
1751      this test for functions where we don't fully describe all the possible
1752      overload signatures in rs6000-overload.def (because they aren't relevant
1753      to the expansion here).  If we don't, we get confusing error messages.  */
1754   /* As an example, for vec_splats we have:
1755 
1756 ; There are no actual builtins for vec_splats.  There is special handling for
1757 ; this in altivec_resolve_overloaded_builtin in rs6000-c.cc, where the call
1758 ; is replaced by a constructor.  The single overload here causes
1759 ; __builtin_vec_splats to be registered with the front end so that can happen.
1760 [VEC_SPLATS, vec_splats, __builtin_vec_splats]
1761   vsi __builtin_vec_splats (vsi);
1762     ABS_V4SI SPLATS_FAKERY
1763 
1764     So even though __builtin_vec_splats accepts all vector types, the
1765     infrastructure cheats and just records one prototype.  We end up getting
1766     an error message that refers to this specific prototype even when we
1767     are handling a different argument type.  That is completely confusing
1768     to the user, so it's best to let these cases be handled individually
1769     in the resolve_vec_splats, etc., helper functions.  */
1770 
1771   if (expected_args != nargs
1772       && !(fcode == RS6000_OVLD_VEC_PROMOTE
1773 	   || fcode == RS6000_OVLD_VEC_SPLATS
1774 	   || fcode == RS6000_OVLD_VEC_EXTRACT
1775 	   || fcode == RS6000_OVLD_VEC_INSERT
1776 	   || fcode == RS6000_OVLD_VEC_STEP))
1777     return NULL;
1778 
1779   for (n = 0;
1780        !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1781        fnargs = TREE_CHAIN (fnargs), n++)
1782     {
1783       tree decl_type = TREE_VALUE (fnargs);
1784       tree arg = (*arglist)[n];
1785 
1786       if (arg == error_mark_node)
1787 	return error_mark_node;
1788 
1789       if (n >= MAX_OVLD_ARGS)
1790 	abort ();
1791 
1792       arg = default_conversion (arg);
1793       tree type = TREE_TYPE (arg);
1794 
1795       /* The C++ front-end converts float * to const void * using
1796 	 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)).  */
1797       if (POINTER_TYPE_P (type)
1798 	  && TREE_CODE (arg) == NOP_EXPR
1799 	  && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1800 					    const_ptr_type_node)
1801 	  && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1802 					    ptr_type_node))
1803 	{
1804 	  arg = TREE_OPERAND (arg, 0);
1805 	  type = TREE_TYPE (arg);
1806 	}
1807 
1808       /* Remove the const from the pointers to simplify the overload
1809 	 matching further down.  */
1810       if (POINTER_TYPE_P (decl_type)
1811 	  && POINTER_TYPE_P (type)
1812 	  && TYPE_QUALS (TREE_TYPE (type)) != 0)
1813 	{
1814 	  if (TYPE_READONLY (TREE_TYPE (type))
1815 	      && !TYPE_READONLY (TREE_TYPE (decl_type)))
1816 	    warning (0, "passing argument %d of %qE discards %qs "
1817 		     "qualifier from pointer target type", n + 1, fndecl,
1818 		     "const");
1819 	  type = build_qualified_type (TREE_TYPE (type), 0);
1820 	  type = build_pointer_type (type);
1821 	  arg = fold_convert (type, arg);
1822 	}
1823 
1824       /* For RS6000_OVLD_VEC_LXVL, convert any const * to its non constant
1825 	 equivalent to simplify the overload matching below.  */
1826       if (fcode == RS6000_OVLD_VEC_LXVL
1827 	  && POINTER_TYPE_P (type)
1828 	  && TYPE_READONLY (TREE_TYPE (type)))
1829 	{
1830 	  type = build_qualified_type (TREE_TYPE (type), 0);
1831 	  type = build_pointer_type (type);
1832 	  arg = fold_convert (type, arg);
1833 	}
1834 
1835       args[n] = arg;
1836       types[n] = type;
1837     }
1838 
1839   /* Some overloads require special handling.  */
1840   tree returned_expr = NULL;
1841   resolution res = unresolved;
1842 
1843   if (fcode == RS6000_OVLD_VEC_MUL)
1844     returned_expr = resolve_vec_mul (&res, args, types, loc);
1845   else if (fcode == RS6000_OVLD_VEC_CMPNE)
1846     returned_expr = resolve_vec_cmpne (&res, args, types, loc);
1847   else if (fcode == RS6000_OVLD_VEC_ADDE || fcode == RS6000_OVLD_VEC_SUBE)
1848     returned_expr = resolve_vec_adde_sube (&res, fcode, args, types, loc);
1849   else if (fcode == RS6000_OVLD_VEC_ADDEC || fcode == RS6000_OVLD_VEC_SUBEC)
1850     returned_expr = resolve_vec_addec_subec (&res, fcode, args, types, loc);
1851   else if (fcode == RS6000_OVLD_VEC_SPLATS || fcode == RS6000_OVLD_VEC_PROMOTE)
1852     returned_expr = resolve_vec_splats (&res, fcode, arglist, nargs);
1853   else if (fcode == RS6000_OVLD_VEC_EXTRACT)
1854     returned_expr = resolve_vec_extract (&res, arglist, nargs, loc);
1855   else if (fcode == RS6000_OVLD_VEC_INSERT)
1856     returned_expr = resolve_vec_insert (&res, arglist, nargs, loc);
1857   else if (fcode == RS6000_OVLD_VEC_STEP)
1858     returned_expr = resolve_vec_step (&res, arglist, nargs);
1859 
1860   if (res == resolved)
1861     return returned_expr;
1862 
1863   /* "Regular" built-in functions and overloaded functions share a namespace
1864      for some arrays, like rs6000_builtin_decls.  But rs6000_overload_info
1865      only has information for the overloaded functions, so we need an
1866      adjusted index for that.  */
1867   unsigned int adj_fcode = fcode - RS6000_OVLD_NONE;
1868 
1869   if (res == resolved_bad)
1870     {
1871       const char *name = rs6000_overload_info[adj_fcode].ovld_name;
1872       error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1873       return error_mark_node;
1874     }
1875 
1876   bool unsupported_builtin = false;
1877   rs6000_gen_builtins instance_code;
1878   bool supported = false;
1879   int instance = rs6000_overload_info[adj_fcode].first_instance;
1880   gcc_assert (instance != -1);
1881 
1882   /* Functions with no arguments can have only one overloaded instance.  */
1883   gcc_assert (nargs > 0 || rs6000_instance_info[instance].next == -1);
1884 
1885   /* Standard overload processing involves determining whether an instance
1886      exists that is type-compatible with the overloaded function call.  In
1887      a couple of cases, we need to do some extra processing to disambiguate
1888      between multiple compatible instances.  */
1889   switch (fcode)
1890     {
1891       /* Need to special case __builtin_cmpb because the overloaded forms
1892 	 of this function take (unsigned int, unsigned int) or (unsigned
1893 	 long long int, unsigned long long int).  Since C conventions
1894 	 allow the respective argument types to be implicitly coerced into
1895 	 each other, the default handling does not provide adequate
1896 	 discrimination between the desired forms of the function.  */
1897     case RS6000_OVLD_SCAL_CMPB:
1898       {
1899 	machine_mode arg1_mode = TYPE_MODE (types[0]);
1900 	machine_mode arg2_mode = TYPE_MODE (types[1]);
1901 
1902 	/* If any supplied arguments are wider than 32 bits, resolve to
1903 	   64-bit variant of built-in function.  */
1904 	if (GET_MODE_PRECISION (arg1_mode) > 32
1905 	    || GET_MODE_PRECISION (arg2_mode) > 32)
1906 	  /* Assure all argument and result types are compatible with
1907 	     the built-in function represented by RS6000_BIF_CMPB.  */
1908 	  instance_code = RS6000_BIF_CMPB;
1909 	else
1910 	  /* Assure all argument and result types are compatible with
1911 	     the built-in function represented by RS6000_BIF_CMPB_32.  */
1912 	  instance_code = RS6000_BIF_CMPB_32;
1913 
1914 	tree call = find_instance (&unsupported_builtin, &instance,
1915 				   instance_code, fcode, types, args);
1916 	if (call != error_mark_node)
1917 	  return call;
1918 	break;
1919       }
1920     case RS6000_OVLD_VEC_VSIE:
1921       {
1922 	machine_mode arg1_mode = TYPE_MODE (types[0]);
1923 
1924 	/* If supplied first argument is wider than 64 bits, resolve to
1925 	   128-bit variant of built-in function.  */
1926 	if (GET_MODE_PRECISION (arg1_mode) > 64)
1927 	  {
1928 	    /* If first argument is of float variety, choose variant
1929 	       that expects __ieee128 argument.  Otherwise, expect
1930 	       __int128 argument.  */
1931 	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1932 	      instance_code = RS6000_BIF_VSIEQPF;
1933 	    else
1934 	      instance_code = RS6000_BIF_VSIEQP;
1935 	  }
1936 	else
1937 	  {
1938 	    /* If first argument is of float variety, choose variant
1939 	       that expects double argument.  Otherwise, expect
1940 	       long long int argument.  */
1941 	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1942 	      instance_code = RS6000_BIF_VSIEDPF;
1943 	    else
1944 	      instance_code = RS6000_BIF_VSIEDP;
1945 	  }
1946 
1947 	tree call = find_instance (&unsupported_builtin, &instance,
1948 				   instance_code, fcode, types, args);
1949 	if (call != error_mark_node)
1950 	  return call;
1951 	break;
1952       }
1953     default:
1954       /* Standard overload processing.  Look for an instance with compatible
1955 	 parameter types.  If it is supported in the current context, resolve
1956 	 the overloaded call to that instance.  */
1957       for (; instance != -1; instance = rs6000_instance_info[instance].next)
1958 	{
1959 	  tree fntype = rs6000_instance_info_fntype[instance];
1960 	  rs6000_gen_builtins bifid = rs6000_instance_info[instance].bifid;
1961 	  /* It is possible for an instance to require a data type that isn't
1962 	     defined on this target, in which case fntype will be
1963 	     NULL.  */
1964 	  if (!fntype)
1965 	    continue;
1966 
1967 	  bool mismatch = false;
1968 	  tree nextparm = TYPE_ARG_TYPES (fntype);
1969 
1970 	  for (unsigned int arg_i = 0;
1971 	       arg_i < nargs && nextparm != NULL;
1972 	       arg_i++)
1973 	    {
1974 	      tree parmtype = TREE_VALUE (nextparm);
1975 	      if (!rs6000_builtin_type_compatible (types[arg_i], parmtype))
1976 		{
1977 		  mismatch = true;
1978 		  break;
1979 		}
1980 	      nextparm = TREE_CHAIN (nextparm);
1981 	    }
1982 
1983 	  if (mismatch)
1984 	    continue;
1985 
1986 	  supported = rs6000_builtin_is_supported (bifid);
1987 	  if (rs6000_builtin_decl (bifid, false) != error_mark_node
1988 	      && supported)
1989 	    {
1990 	      tree ret_type = TREE_TYPE (fntype);
1991 	      fntype = rs6000_builtin_info_fntype[bifid];
1992 	      return altivec_build_resolved_builtin (args, nargs, fntype,
1993 						     ret_type, bifid, fcode);
1994 	    }
1995 	  else
1996 	    {
1997 	      unsupported_builtin = true;
1998 	      break;
1999 	    }
2000 	}
2001     }
2002 
2003   if (unsupported_builtin)
2004     {
2005       const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2006       if (!supported)
2007 	{
2008 	  /* Indicate that the instantiation of the overloaded builtin
2009 	     name is not available with the target flags in effect.  */
2010 	  rs6000_gen_builtins bifid = rs6000_instance_info[instance].bifid;
2011 	  rs6000_gen_builtins fcode = (rs6000_gen_builtins) bifid;
2012 	  rs6000_invalid_builtin (fcode);
2013 	  /* Provide clarity of the relationship between the overload
2014 	     and the instantiation.  */
2015 	  const char *internal_name = rs6000_builtin_info[bifid].bifname;
2016 	  rich_location richloc (line_table, input_location);
2017 	  inform (&richloc,
2018 		  "overloaded builtin %qs is implemented by builtin %qs",
2019 		  name, internal_name);
2020 	}
2021       else
2022 	error ("%qs is not supported in this compiler configuration", name);
2023 
2024       return error_mark_node;
2025     }
2026 
2027   /* If we fall through to here, there were no compatible instances.  */
2028   const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2029   error ("invalid parameter combination for AltiVec intrinsic %qs", name);
2030   return error_mark_node;
2031 }
2032