xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/i386/predicates.md (revision 5dd36a3bc8bf2a9dec29ceb6349550414570c447)
1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004-2017 Free Software Foundation, Inc.
3;;
4;; This file is part of GCC.
5;;
6;; GCC is free software; you can redistribute it and/or modify
7;; it under the terms of the GNU General Public License as published by
8;; the Free Software Foundation; either version 3, or (at your option)
9;; any later version.
10;;
11;; GCC is distributed in the hope that it will be useful,
12;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14;; GNU General Public License for more details.
15;;
16;; You should have received a copy of the GNU General Public License
17;; along with GCC; see the file COPYING3.  If not see
18;; <http://www.gnu.org/licenses/>.
19
20;; Return true if OP is either a i387 or SSE fp register.
21(define_predicate "any_fp_register_operand"
22  (and (match_code "reg")
23       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
24
25;; Return true if OP is an i387 fp register.
26(define_predicate "fp_register_operand"
27  (and (match_code "reg")
28       (match_test "STACK_REGNO_P (REGNO (op))")))
29
30;; True if the operand is a GENERAL class register.
31(define_predicate "general_reg_operand"
32  (and (match_code "reg")
33       (match_test "GENERAL_REGNO_P (REGNO (op))")))
34
35;; True if the operand is a nonimmediate operand with GENERAL class register.
36(define_predicate "nonimmediate_gr_operand"
37  (if_then_else (match_code "reg")
38    (match_test "GENERAL_REGNO_P (REGNO (op))")
39    (match_operand 0 "nonimmediate_operand")))
40
41;; True if the operand is a general operand with GENERAL class register.
42(define_predicate "general_gr_operand"
43  (if_then_else (match_code "reg")
44    (match_test "GENERAL_REGNO_P (REGNO (op))")
45    (match_operand 0 "general_operand")))
46
47;; True if the operand is an MMX register.
48(define_predicate "mmx_reg_operand"
49  (and (match_code "reg")
50       (match_test "MMX_REGNO_P (REGNO (op))")))
51
52;; True if the operand is an SSE register.
53(define_predicate "sse_reg_operand"
54  (and (match_code "reg")
55       (match_test "SSE_REGNO_P (REGNO (op))")))
56
57;; True if the operand is an AVX-512 new register.
58(define_predicate "ext_sse_reg_operand"
59  (and (match_code "reg")
60       (match_test "EXT_REX_SSE_REGNO_P (REGNO (op))")))
61
62;; Return true if op is a QImode register.
63(define_predicate "any_QIreg_operand"
64  (and (match_code "reg")
65       (match_test "ANY_QI_REGNO_P (REGNO (op))")))
66
67;; Return true if op is one of QImode registers: %[abcd][hl].
68(define_predicate "QIreg_operand"
69  (and (match_code "reg")
70       (match_test "QI_REGNO_P (REGNO (op))")))
71
72;; Return true if op is a QImode register operand other than %[abcd][hl].
73(define_predicate "ext_QIreg_operand"
74  (and (match_test "TARGET_64BIT")
75       (match_code "reg")
76       (not (match_test "QI_REGNO_P (REGNO (op))"))))
77
78;; Return true if op is the AX register.
79(define_predicate "ax_reg_operand"
80  (and (match_code "reg")
81       (match_test "REGNO (op) == AX_REG")))
82
83;; Return true if op is the flags register.
84(define_predicate "flags_reg_operand"
85  (and (match_code "reg")
86       (match_test "REGNO (op) == FLAGS_REG")))
87
88;; Match a DI, SI or HImode register for a zero_extract.
89(define_special_predicate "ext_register_operand"
90  (and (match_operand 0 "register_operand")
91       (ior (and (match_test "TARGET_64BIT")
92		 (match_test "GET_MODE (op) == DImode"))
93	    (match_test "GET_MODE (op) == SImode")
94	    (match_test "GET_MODE (op) == HImode"))))
95
96;; Match register operands, but include memory operands for TARGET_SSE_MATH.
97(define_predicate "register_ssemem_operand"
98  (if_then_else
99    (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
100    (match_operand 0 "nonimmediate_operand")
101    (match_operand 0 "register_operand")))
102
103;; Match nonimmediate operands, but exclude memory operands
104;; for TARGET_SSE_MATH if TARGET_MIX_SSE_I387 is not enabled.
105(define_predicate "nonimm_ssenomem_operand"
106  (if_then_else
107    (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
108	 (not (match_test "TARGET_MIX_SSE_I387")))
109    (match_operand 0 "register_operand")
110    (match_operand 0 "nonimmediate_operand")))
111
112;; The above predicate, suitable for x87 arithmetic operators.
113(define_predicate "x87nonimm_ssenomem_operand"
114  (if_then_else
115    (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
116	 (not (match_test "TARGET_MIX_SSE_I387 && X87_ENABLE_ARITH (mode)")))
117    (match_operand 0 "register_operand")
118    (match_operand 0 "nonimmediate_operand")))
119
120;; Match register operands, include memory operand for TARGET_SSE4_1.
121(define_predicate "register_sse4nonimm_operand"
122  (if_then_else (match_test "TARGET_SSE4_1")
123    (match_operand 0 "nonimmediate_operand")
124    (match_operand 0 "register_operand")))
125
126;; Return true if VALUE is symbol reference
127(define_predicate "symbol_operand"
128  (match_code "symbol_ref"))
129
130;; Return true if VALUE can be stored in a sign extended immediate field.
131(define_predicate "x86_64_immediate_operand"
132  (match_code "const_int,symbol_ref,label_ref,const")
133{
134  if (!TARGET_64BIT)
135    return immediate_operand (op, mode);
136
137  switch (GET_CODE (op))
138    {
139    case CONST_INT:
140      {
141        HOST_WIDE_INT val = INTVAL (op);
142        return trunc_int_for_mode (val, SImode) == val;
143      }
144    case SYMBOL_REF:
145      /* TLS symbols are not constant.  */
146      if (SYMBOL_REF_TLS_MODEL (op))
147	return false;
148
149      /* Load the external function address via the GOT slot.  */
150      if (ix86_force_load_from_GOT_p (op))
151	return false;
152
153      /* For certain code models, the symbolic references are known to fit.
154	 in CM_SMALL_PIC model we know it fits if it is local to the shared
155	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
156	 only if inside of UNSPEC handled below.  */
157      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
158	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
159
160    case LABEL_REF:
161      /* For certain code models, the code is near as well.  */
162      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
163	      || ix86_cmodel == CM_KERNEL);
164
165    case CONST:
166      /* We also may accept the offsetted memory references in certain
167	 special cases.  */
168      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
169	switch (XINT (XEXP (op, 0), 1))
170	  {
171	  case UNSPEC_GOTPCREL:
172	  case UNSPEC_DTPOFF:
173	  case UNSPEC_GOTNTPOFF:
174	  case UNSPEC_NTPOFF:
175	    return true;
176	  default:
177	    break;
178	  }
179
180      if (GET_CODE (XEXP (op, 0)) == PLUS)
181	{
182	  rtx op1 = XEXP (XEXP (op, 0), 0);
183	  rtx op2 = XEXP (XEXP (op, 0), 1);
184
185	  if (ix86_cmodel == CM_LARGE)
186	    return false;
187	  if (!CONST_INT_P (op2))
188	    return false;
189
190	  HOST_WIDE_INT offset = INTVAL (op2);
191	  if (trunc_int_for_mode (offset, SImode) != offset)
192	    return false;
193
194	  switch (GET_CODE (op1))
195	    {
196	    case SYMBOL_REF:
197	      /* TLS symbols are not constant.  */
198	      if (SYMBOL_REF_TLS_MODEL (op1))
199		return false;
200
201	      /* Load the external function address via the GOT slot.  */
202	      if (ix86_force_load_from_GOT_p (op1))
203	        return false;
204
205	      /* For CM_SMALL assume that latest object is 16MB before
206		 end of 31bits boundary.  We may also accept pretty
207		 large negative constants knowing that all objects are
208		 in the positive half of address space.  */
209	      if ((ix86_cmodel == CM_SMALL
210		   || (ix86_cmodel == CM_MEDIUM
211		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
212		  && offset < 16*1024*1024)
213		return true;
214	      /* For CM_KERNEL we know that all object resist in the
215		 negative half of 32bits address space.  We may not
216		 accept negative offsets, since they may be just off
217		 and we may accept pretty large positive ones.  */
218	      if (ix86_cmodel == CM_KERNEL
219		  && offset > 0)
220		return true;
221	      break;
222
223	    case LABEL_REF:
224	      /* These conditions are similar to SYMBOL_REF ones, just the
225		 constraints for code models differ.  */
226	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
227		  && offset < 16*1024*1024)
228		return true;
229	      if (ix86_cmodel == CM_KERNEL
230		  && offset > 0)
231		return true;
232	      break;
233
234	    case UNSPEC:
235	      switch (XINT (op1, 1))
236		{
237		case UNSPEC_DTPOFF:
238		case UNSPEC_NTPOFF:
239		  return true;
240		}
241	      break;
242
243	    default:
244	      break;
245	    }
246	}
247      break;
248
249      default:
250	gcc_unreachable ();
251    }
252
253  return false;
254})
255
256;; Return true if VALUE can be stored in the zero extended immediate field.
257(define_predicate "x86_64_zext_immediate_operand"
258  (match_code "const_int,symbol_ref,label_ref,const")
259{
260  switch (GET_CODE (op))
261    {
262    case CONST_INT:
263      return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
264
265    case SYMBOL_REF:
266      /* TLS symbols are not constant.  */
267      if (SYMBOL_REF_TLS_MODEL (op))
268	return false;
269
270      /* Load the external function address via the GOT slot.  */
271      if (ix86_force_load_from_GOT_p (op))
272	return false;
273
274     /* For certain code models, the symbolic references are known to fit.  */
275      return (ix86_cmodel == CM_SMALL
276	      || (ix86_cmodel == CM_MEDIUM
277		  && !SYMBOL_REF_FAR_ADDR_P (op)));
278
279    case LABEL_REF:
280      /* For certain code models, the code is near as well.  */
281      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
282
283    case CONST:
284      /* We also may accept the offsetted memory references in certain
285	 special cases.  */
286      if (GET_CODE (XEXP (op, 0)) == PLUS)
287	{
288	  rtx op1 = XEXP (XEXP (op, 0), 0);
289	  rtx op2 = XEXP (XEXP (op, 0), 1);
290
291	  if (ix86_cmodel == CM_LARGE)
292	    return false;
293	  if (!CONST_INT_P (op2))
294	    return false;
295
296	  HOST_WIDE_INT offset = INTVAL (op2);
297	  if (trunc_int_for_mode (offset, SImode) != offset)
298	    return false;
299
300	  switch (GET_CODE (op1))
301	    {
302	    case SYMBOL_REF:
303	      /* TLS symbols are not constant.  */
304	      if (SYMBOL_REF_TLS_MODEL (op1))
305		return false;
306
307	      /* Load the external function address via the GOT slot.  */
308	      if (ix86_force_load_from_GOT_p (op1))
309	        return false;
310
311	      /* For small code model we may accept pretty large positive
312		 offsets, since one bit is available for free.  Negative
313		 offsets are limited by the size of NULL pointer area
314		 specified by the ABI.  */
315	      if ((ix86_cmodel == CM_SMALL
316		   || (ix86_cmodel == CM_MEDIUM
317		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
318		  && offset > -0x10000)
319		return true;
320	      /* ??? For the kernel, we may accept adjustment of
321		 -0x10000000, since we know that it will just convert
322		 negative address space to positive, but perhaps this
323		 is not worthwhile.  */
324	      break;
325
326	    case LABEL_REF:
327	      /* These conditions are similar to SYMBOL_REF ones, just the
328		 constraints for code models differ.  */
329	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
330		  && offset > -0x10000)
331		return true;
332	      break;
333
334	    default:
335	      return false;
336	    }
337	}
338      break;
339
340    default:
341      gcc_unreachable ();
342    }
343  return false;
344})
345
346;; Return true if VALUE is a constant integer whose low and high words satisfy
347;; x86_64_immediate_operand.
348(define_predicate "x86_64_hilo_int_operand"
349  (match_code "const_int,const_wide_int")
350{
351  switch (GET_CODE (op))
352    {
353    case CONST_INT:
354      return x86_64_immediate_operand (op, mode);
355
356    case CONST_WIDE_INT:
357      gcc_assert (CONST_WIDE_INT_NUNITS (op) == 2);
358      return (x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op, 0)),
359					DImode)
360	      && x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op,
361									1)),
362					   DImode));
363
364    default:
365      gcc_unreachable ();
366    }
367})
368
369;; Return true if size of VALUE can be stored in a sign
370;; extended immediate field.
371(define_predicate "x86_64_immediate_size_operand"
372  (and (match_code "symbol_ref")
373       (ior (not (match_test "TARGET_64BIT"))
374	    (match_test "ix86_cmodel == CM_SMALL")
375	    (match_test "ix86_cmodel == CM_KERNEL"))))
376
377;; Return true if OP is general operand representable on x86_64.
378(define_predicate "x86_64_general_operand"
379  (if_then_else (match_test "TARGET_64BIT")
380    (ior (match_operand 0 "nonimmediate_operand")
381	 (match_operand 0 "x86_64_immediate_operand"))
382    (match_operand 0 "general_operand")))
383
384;; Return true if OP's both words are general operands representable
385;; on x86_64.
386(define_predicate "x86_64_hilo_general_operand"
387  (if_then_else (match_test "TARGET_64BIT")
388    (ior (match_operand 0 "nonimmediate_operand")
389	 (match_operand 0 "x86_64_hilo_int_operand"))
390    (match_operand 0 "general_operand")))
391
392;; Return true if OP is non-VOIDmode general operand representable
393;; on x86_64.  This predicate is used in sign-extending conversion
394;; operations that require non-VOIDmode immediate operands.
395(define_predicate "x86_64_sext_operand"
396  (and (match_test "GET_MODE (op) != VOIDmode")
397       (match_operand 0 "x86_64_general_operand")))
398
399;; Return true if OP is non-VOIDmode general operand.  This predicate
400;; is used in sign-extending conversion operations that require
401;; non-VOIDmode immediate operands.
402(define_predicate "sext_operand"
403  (and (match_test "GET_MODE (op) != VOIDmode")
404       (match_operand 0 "general_operand")))
405
406;; Return true if OP is representable on x86_64 as zero-extended operand.
407;; This predicate is used in zero-extending conversion operations that
408;; require non-VOIDmode immediate operands.
409(define_predicate "x86_64_zext_operand"
410  (if_then_else (match_test "TARGET_64BIT")
411    (ior (match_operand 0 "nonimmediate_operand")
412	 (and (match_operand 0 "x86_64_zext_immediate_operand")
413	      (match_test "GET_MODE (op) != VOIDmode")))
414    (match_operand 0 "nonimmediate_operand")))
415
416;; Return true if OP is general operand representable on x86_64
417;; as either sign extended or zero extended constant.
418(define_predicate "x86_64_szext_general_operand"
419  (if_then_else (match_test "TARGET_64BIT")
420    (ior (match_operand 0 "nonimmediate_operand")
421	 (match_operand 0 "x86_64_immediate_operand")
422	 (match_operand 0 "x86_64_zext_immediate_operand"))
423    (match_operand 0 "general_operand")))
424
425;; Return true if OP is nonmemory operand representable on x86_64.
426(define_predicate "x86_64_nonmemory_operand"
427  (if_then_else (match_test "TARGET_64BIT")
428    (ior (match_operand 0 "register_operand")
429	 (match_operand 0 "x86_64_immediate_operand"))
430    (match_operand 0 "nonmemory_operand")))
431
432;; Return true if OP is nonmemory operand representable on x86_64.
433(define_predicate "x86_64_szext_nonmemory_operand"
434  (if_then_else (match_test "TARGET_64BIT")
435    (ior (match_operand 0 "register_operand")
436	 (match_operand 0 "x86_64_immediate_operand")
437	 (match_operand 0 "x86_64_zext_immediate_operand"))
438    (match_operand 0 "nonmemory_operand")))
439
440;; Return true when operand is PIC expression that can be computed by lea
441;; operation.
442(define_predicate "pic_32bit_operand"
443  (match_code "const,symbol_ref,label_ref")
444{
445  if (!flag_pic)
446    return false;
447
448  /* Rule out relocations that translate into 64bit constants.  */
449  if (TARGET_64BIT && GET_CODE (op) == CONST)
450    {
451      op = XEXP (op, 0);
452      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
453	op = XEXP (op, 0);
454      if (GET_CODE (op) == UNSPEC
455	  && (XINT (op, 1) == UNSPEC_GOTOFF
456	      || XINT (op, 1) == UNSPEC_GOT))
457	return false;
458    }
459
460  return symbolic_operand (op, mode);
461})
462
463;; Return true if OP is nonmemory operand acceptable by movabs patterns.
464(define_predicate "x86_64_movabs_operand"
465  (and (match_operand 0 "nonmemory_operand")
466       (not (match_operand 0 "pic_32bit_operand"))))
467
468;; Return true if OP is either a symbol reference or a sum of a symbol
469;; reference and a constant.
470(define_predicate "symbolic_operand"
471  (match_code "symbol_ref,label_ref,const")
472{
473  switch (GET_CODE (op))
474    {
475    case SYMBOL_REF:
476    case LABEL_REF:
477      return true;
478
479    case CONST:
480      op = XEXP (op, 0);
481      if (GET_CODE (op) == SYMBOL_REF
482	  || GET_CODE (op) == LABEL_REF
483	  || (GET_CODE (op) == UNSPEC
484	      && (XINT (op, 1) == UNSPEC_GOT
485		  || XINT (op, 1) == UNSPEC_GOTOFF
486		  || XINT (op, 1) == UNSPEC_PCREL
487		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
488	return true;
489      if (GET_CODE (op) != PLUS
490	  || !CONST_INT_P (XEXP (op, 1)))
491	return false;
492
493      op = XEXP (op, 0);
494      if (GET_CODE (op) == SYMBOL_REF
495	  || GET_CODE (op) == LABEL_REF)
496	return true;
497      /* Only @GOTOFF gets offsets.  */
498      if (GET_CODE (op) != UNSPEC
499	  || XINT (op, 1) != UNSPEC_GOTOFF)
500	return false;
501
502      op = XVECEXP (op, 0, 0);
503      if (GET_CODE (op) == SYMBOL_REF
504	  || GET_CODE (op) == LABEL_REF)
505	return true;
506      return false;
507
508    default:
509      gcc_unreachable ();
510    }
511})
512
513;; Return true if OP is a symbolic operand that resolves locally.
514(define_predicate "local_symbolic_operand"
515  (match_code "const,label_ref,symbol_ref")
516{
517  if (GET_CODE (op) == CONST
518      && GET_CODE (XEXP (op, 0)) == PLUS
519      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
520    op = XEXP (XEXP (op, 0), 0);
521
522  if (GET_CODE (op) == LABEL_REF)
523    return true;
524
525  if (GET_CODE (op) != SYMBOL_REF)
526    return false;
527
528  if (SYMBOL_REF_TLS_MODEL (op))
529    return false;
530
531  /* Dll-imported symbols are always external.  */
532  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
533    return false;
534  if (SYMBOL_REF_LOCAL_P (op))
535    return true;
536
537  /* There is, however, a not insubstantial body of code in the rest of
538     the compiler that assumes it can just stick the results of
539     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
540  /* ??? This is a hack.  Should update the body of the compiler to
541     always create a DECL an invoke targetm.encode_section_info.  */
542  if (strncmp (XSTR (op, 0), internal_label_prefix,
543	       internal_label_prefix_len) == 0)
544    return true;
545
546  return false;
547})
548
549;; Test for a legitimate @GOTOFF operand.
550;;
551;; VxWorks does not impose a fixed gap between segments; the run-time
552;; gap can be different from the object-file gap.  We therefore can't
553;; use @GOTOFF unless we are absolutely sure that the symbol is in the
554;; same segment as the GOT.  Unfortunately, the flexibility of linker
555;; scripts means that we can't be sure of that in general, so assume
556;; that @GOTOFF is never valid on VxWorks.
557(define_predicate "gotoff_operand"
558  (and (not (match_test "TARGET_VXWORKS_RTP"))
559       (match_operand 0 "local_symbolic_operand")))
560
561;; Test for various thread-local symbols.
562(define_special_predicate "tls_symbolic_operand"
563  (and (match_code "symbol_ref")
564       (match_test "SYMBOL_REF_TLS_MODEL (op)")))
565
566(define_special_predicate "tls_modbase_operand"
567  (and (match_code "symbol_ref")
568       (match_test "op == ix86_tls_module_base ()")))
569
570;; Test for a pc-relative call operand
571(define_predicate "constant_call_address_operand"
572  (match_code "symbol_ref")
573{
574  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
575    return false;
576  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
577    return false;
578  return true;
579})
580
581;; P6 processors will jump to the address after the decrement when %esp
582;; is used as a call operand, so they will execute return address as a code.
583;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
584
585(define_predicate "call_register_no_elim_operand"
586  (match_operand 0 "register_operand")
587{
588  if (SUBREG_P (op))
589    op = SUBREG_REG (op);
590
591  if (!TARGET_64BIT && op == stack_pointer_rtx)
592    return false;
593
594  return register_no_elim_operand (op, mode);
595})
596
597;; True for any non-virtual or eliminable register.  Used in places where
598;; instantiation of such a register may cause the pattern to not be recognized.
599(define_predicate "register_no_elim_operand"
600  (match_operand 0 "register_operand")
601{
602  if (SUBREG_P (op))
603    op = SUBREG_REG (op);
604  return !(op == arg_pointer_rtx
605	   || op == frame_pointer_rtx
606	   || IN_RANGE (REGNO (op),
607			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
608})
609
610;; Similarly, but include the stack pointer.  This is used to prevent esp
611;; from being used as an index reg.
612(define_predicate "index_register_operand"
613  (match_operand 0 "register_operand")
614{
615  if (SUBREG_P (op))
616    op = SUBREG_REG (op);
617  if (reload_completed)
618    return REG_OK_FOR_INDEX_STRICT_P (op);
619  else
620    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
621})
622
623;; Return false if this is any eliminable register.  Otherwise general_operand.
624(define_predicate "general_no_elim_operand"
625  (if_then_else (match_code "reg,subreg")
626    (match_operand 0 "register_no_elim_operand")
627    (match_operand 0 "general_operand")))
628
629;; Return false if this is any eliminable register.  Otherwise
630;; register_operand or a constant.
631(define_predicate "nonmemory_no_elim_operand"
632  (ior (match_operand 0 "register_no_elim_operand")
633       (match_operand 0 "immediate_operand")))
634
635;; Test for a valid operand for indirect branch.
636(define_predicate "indirect_branch_operand"
637  (ior (match_operand 0 "register_operand")
638       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
639	    (not (match_test "TARGET_X32"))
640	    (match_operand 0 "memory_operand"))))
641
642;; Return true if OP is a memory operands that can be used in sibcalls.
643;; Since sibcall never returns, we can only use call-clobbered register
644;; as GOT base.  Allow GOT slot here only with pseudo register as GOT
645;; base.  Properly handle sibcall over GOT slot with *sibcall_GOT_32
646;; and *sibcall_value_GOT_32 patterns.
647(define_predicate "sibcall_memory_operand"
648  (match_operand 0 "memory_operand")
649{
650  op = XEXP (op, 0);
651  if (CONSTANT_P (op))
652    return true;
653  if (GET_CODE (op) == PLUS && REG_P (XEXP (op, 0)))
654    {
655      int regno = REGNO (XEXP (op, 0));
656      if (!HARD_REGISTER_NUM_P (regno) || call_used_regs[regno])
657	{
658	  op = XEXP (op, 1);
659	  if (GOT32_symbol_operand (op, VOIDmode))
660	    return true;
661	}
662    }
663  return false;
664})
665
666;; Return true if OP is a GOT memory operand.
667(define_predicate "GOT_memory_operand"
668  (match_operand 0 "memory_operand")
669{
670  op = XEXP (op, 0);
671  return (GET_CODE (op) == CONST
672	  && GET_CODE (XEXP (op, 0)) == UNSPEC
673	  && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL);
674})
675
676;; Test for a valid operand for a call instruction.
677;; Allow constant call address operands in Pmode only.
678(define_special_predicate "call_insn_operand"
679  (ior (match_test "constant_call_address_operand
680		     (op, mode == VOIDmode ? mode : Pmode)")
681       (match_operand 0 "call_register_no_elim_operand")
682       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
683	    (ior (and (not (match_test "TARGET_X32"))
684		      (match_operand 0 "memory_operand"))
685		 (and (match_test "TARGET_X32 && Pmode == DImode")
686		      (match_operand 0 "GOT_memory_operand"))))))
687
688;; Similarly, but for tail calls, in which we cannot allow memory references.
689(define_special_predicate "sibcall_insn_operand"
690  (ior (match_test "constant_call_address_operand
691		     (op, mode == VOIDmode ? mode : Pmode)")
692       (match_operand 0 "register_no_elim_operand")
693       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
694	    (ior (and (not (match_test "TARGET_X32"))
695		      (match_operand 0 "sibcall_memory_operand"))
696		 (and (match_test "TARGET_X32 && Pmode == DImode")
697		      (match_operand 0 "GOT_memory_operand"))))))
698
699;; Return true if OP is a 32-bit GOT symbol operand.
700(define_predicate "GOT32_symbol_operand"
701  (match_test "GET_CODE (op) == CONST
702               && GET_CODE (XEXP (op, 0)) == UNSPEC
703               && XINT (XEXP (op, 0), 1) == UNSPEC_GOT"))
704
705;; Match exactly zero.
706(define_predicate "const0_operand"
707  (match_code "const_int,const_double,const_vector")
708{
709  if (mode == VOIDmode)
710    mode = GET_MODE (op);
711  return op == CONST0_RTX (mode);
712})
713
714;; Match one or a vector with all elements equal to one.
715(define_predicate "const1_operand"
716  (match_code "const_int,const_double,const_vector")
717{
718  if (mode == VOIDmode)
719    mode = GET_MODE (op);
720  return op == CONST1_RTX (mode);
721})
722
723;; Match exactly -1.
724(define_predicate "constm1_operand"
725  (and (match_code "const_int")
726       (match_test "op == constm1_rtx")))
727
728;; Match exactly eight.
729(define_predicate "const8_operand"
730  (and (match_code "const_int")
731       (match_test "INTVAL (op) == 8")))
732
733;; Match exactly 128.
734(define_predicate "const128_operand"
735  (and (match_code "const_int")
736       (match_test "INTVAL (op) == 128")))
737
738;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
739(define_predicate "const_32bit_mask"
740  (and (match_code "const_int")
741       (match_test "trunc_int_for_mode (INTVAL (op), DImode)
742		    == (HOST_WIDE_INT) 0xffffffff")))
743
744;; Match 2, 4, or 8.  Used for leal multiplicands.
745(define_predicate "const248_operand"
746  (match_code "const_int")
747{
748  HOST_WIDE_INT i = INTVAL (op);
749  return i == 2 || i == 4 || i == 8;
750})
751
752;; Match 1, 2, or 3.  Used for lea shift amounts.
753(define_predicate "const123_operand"
754  (match_code "const_int")
755{
756  HOST_WIDE_INT i = INTVAL (op);
757  return i == 1 || i == 2 || i == 3;
758})
759
760;; Match 2, 3, 6, or 7
761(define_predicate "const2367_operand"
762  (match_code "const_int")
763{
764  HOST_WIDE_INT i = INTVAL (op);
765  return i == 2 || i == 3 || i == 6 || i == 7;
766})
767
768;; Match 1, 2, 4, or 8
769(define_predicate "const1248_operand"
770  (match_code "const_int")
771{
772  HOST_WIDE_INT i = INTVAL (op);
773  return i == 1 || i == 2 || i == 4 || i == 8;
774})
775
776;; Match 3, 5, or 9.  Used for leal multiplicands.
777(define_predicate "const359_operand"
778  (match_code "const_int")
779{
780  HOST_WIDE_INT i = INTVAL (op);
781  return i == 3 || i == 5 || i == 9;
782})
783
784;; Match 4 or 8 to 11.  Used for embeded rounding.
785(define_predicate "const_4_or_8_to_11_operand"
786  (match_code "const_int")
787{
788  HOST_WIDE_INT i = INTVAL (op);
789  return i == 4 || (i >= 8 && i <= 11);
790})
791
792;; Match 4 or 8. Used for SAE.
793(define_predicate "const48_operand"
794  (match_code "const_int")
795{
796  HOST_WIDE_INT i = INTVAL (op);
797  return i == 4 || i == 8;
798})
799
800;; Match 0 or 1.
801(define_predicate "const_0_to_1_operand"
802  (and (match_code "const_int")
803       (ior (match_test "op == const0_rtx")
804	    (match_test "op == const1_rtx"))))
805
806;; Match 0 to 3.
807(define_predicate "const_0_to_3_operand"
808  (and (match_code "const_int")
809       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
810
811;; Match 0 to 4.
812(define_predicate "const_0_to_4_operand"
813  (and (match_code "const_int")
814       (match_test "IN_RANGE (INTVAL (op), 0, 4)")))
815
816;; Match 0 to 5.
817(define_predicate "const_0_to_5_operand"
818  (and (match_code "const_int")
819       (match_test "IN_RANGE (INTVAL (op), 0, 5)")))
820
821;; Match 0 to 7.
822(define_predicate "const_0_to_7_operand"
823  (and (match_code "const_int")
824       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
825
826;; Match 0 to 15.
827(define_predicate "const_0_to_15_operand"
828  (and (match_code "const_int")
829       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
830
831;; Match 0 to 31.
832(define_predicate "const_0_to_31_operand"
833  (and (match_code "const_int")
834       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
835
836;; Match 0 to 63.
837(define_predicate "const_0_to_63_operand"
838  (and (match_code "const_int")
839       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
840
841;; Match 0 to 255.
842(define_predicate "const_0_to_255_operand"
843  (and (match_code "const_int")
844       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
845
846;; Match (0 to 255) * 8
847(define_predicate "const_0_to_255_mul_8_operand"
848  (match_code "const_int")
849{
850  unsigned HOST_WIDE_INT val = INTVAL (op);
851  return val <= 255*8 && val % 8 == 0;
852})
853
854;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
855;; for shift & compare patterns, as shifting by 0 does not change flags).
856(define_predicate "const_1_to_31_operand"
857  (and (match_code "const_int")
858       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
859
860;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
861;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
862(define_predicate "const_1_to_63_operand"
863  (and (match_code "const_int")
864       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
865
866;; Match 2 or 3.
867(define_predicate "const_2_to_3_operand"
868  (and (match_code "const_int")
869       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
870
871;; Match 4 to 5.
872(define_predicate "const_4_to_5_operand"
873  (and (match_code "const_int")
874       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
875
876;; Match 4 to 7.
877(define_predicate "const_4_to_7_operand"
878  (and (match_code "const_int")
879       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
880
881;; Match 6 to 7.
882(define_predicate "const_6_to_7_operand"
883  (and (match_code "const_int")
884       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
885
886;; Match 8 to 9.
887(define_predicate "const_8_to_9_operand"
888  (and (match_code "const_int")
889       (match_test "IN_RANGE (INTVAL (op), 8, 9)")))
890
891;; Match 8 to 11.
892(define_predicate "const_8_to_11_operand"
893  (and (match_code "const_int")
894       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
895
896;; Match 8 to 15.
897(define_predicate "const_8_to_15_operand"
898  (and (match_code "const_int")
899       (match_test "IN_RANGE (INTVAL (op), 8, 15)")))
900
901;; Match 10 to 11.
902(define_predicate "const_10_to_11_operand"
903  (and (match_code "const_int")
904       (match_test "IN_RANGE (INTVAL (op), 10, 11)")))
905
906;; Match 12 to 13.
907(define_predicate "const_12_to_13_operand"
908  (and (match_code "const_int")
909       (match_test "IN_RANGE (INTVAL (op), 12, 13)")))
910
911;; Match 12 to 15.
912(define_predicate "const_12_to_15_operand"
913  (and (match_code "const_int")
914       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
915
916;; Match 14 to 15.
917(define_predicate "const_14_to_15_operand"
918  (and (match_code "const_int")
919       (match_test "IN_RANGE (INTVAL (op), 14, 15)")))
920
921;; Match 16 to 19.
922(define_predicate "const_16_to_19_operand"
923  (and (match_code "const_int")
924       (match_test "IN_RANGE (INTVAL (op), 16, 19)")))
925
926;; Match 16 to 31.
927(define_predicate "const_16_to_31_operand"
928  (and (match_code "const_int")
929       (match_test "IN_RANGE (INTVAL (op), 16, 31)")))
930
931;; Match 20 to 23.
932(define_predicate "const_20_to_23_operand"
933  (and (match_code "const_int")
934       (match_test "IN_RANGE (INTVAL (op), 20, 23)")))
935
936;; Match 24 to 27.
937(define_predicate "const_24_to_27_operand"
938  (and (match_code "const_int")
939       (match_test "IN_RANGE (INTVAL (op), 24, 27)")))
940
941;; Match 28 to 31.
942(define_predicate "const_28_to_31_operand"
943  (and (match_code "const_int")
944       (match_test "IN_RANGE (INTVAL (op), 28, 31)")))
945
946;; True if this is a constant appropriate for an increment or decrement.
947(define_predicate "incdec_operand"
948  (match_code "const_int")
949{
950  /* On Pentium4, the inc and dec operations causes extra dependency on flag
951     registers, since carry flag is not set.  */
952  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
953    return false;
954  return op == const1_rtx || op == constm1_rtx;
955})
956
957;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
958(define_predicate "reg_or_pm1_operand"
959  (ior (match_operand 0 "register_operand")
960       (and (match_code "const_int")
961	    (ior (match_test "op == const1_rtx")
962		 (match_test "op == constm1_rtx")))))
963
964;; True if OP is acceptable as operand of DImode shift expander.
965(define_predicate "shiftdi_operand"
966  (if_then_else (match_test "TARGET_64BIT")
967    (match_operand 0 "nonimmediate_operand")
968    (match_operand 0 "register_operand")))
969
970(define_predicate "ashldi_input_operand"
971  (if_then_else (match_test "TARGET_64BIT")
972    (match_operand 0 "nonimmediate_operand")
973    (match_operand 0 "reg_or_pm1_operand")))
974
975;; Return true if OP is a vector load from the constant pool with just
976;; the first element nonzero.
977(define_predicate "zero_extended_scalar_load_operand"
978  (match_code "mem")
979{
980  unsigned n_elts;
981  op = avoid_constant_pool_reference (op);
982
983  if (GET_CODE (op) != CONST_VECTOR)
984    return false;
985
986  n_elts = CONST_VECTOR_NUNITS (op);
987
988  for (n_elts--; n_elts > 0; n_elts--)
989    {
990      rtx elt = CONST_VECTOR_ELT (op, n_elts);
991      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
992	return false;
993    }
994  return true;
995})
996
997/* Return true if operand is a vector constant that is all ones. */
998(define_predicate "vector_all_ones_operand"
999  (and (match_code "const_vector")
1000       (match_test "INTEGRAL_MODE_P (GET_MODE (op))")
1001       (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
1002
1003; Return true when OP is operand acceptable for vector memory operand.
1004; Only AVX can have misaligned memory operand.
1005(define_predicate "vector_memory_operand"
1006  (and (match_operand 0 "memory_operand")
1007       (ior (match_test "TARGET_AVX")
1008	    (match_test "MEM_ALIGN (op) >= GET_MODE_ALIGNMENT (mode)"))))
1009
1010; Return true when OP is register_operand or vector_memory_operand.
1011(define_predicate "vector_operand"
1012  (ior (match_operand 0 "register_operand")
1013       (match_operand 0 "vector_memory_operand")))
1014
1015; Return true when OP is operand acceptable for standard SSE move.
1016(define_predicate "vector_move_operand"
1017  (ior (match_operand 0 "nonimmediate_operand")
1018       (match_operand 0 "const0_operand")))
1019
1020;; Return true when OP is either nonimmediate operand, or any
1021;; CONST_VECTOR.
1022(define_predicate "nonimmediate_or_const_vector_operand"
1023  (ior (match_operand 0 "nonimmediate_operand")
1024       (match_code "const_vector")))
1025
1026;; Return true when OP is nonimmediate or standard SSE constant.
1027(define_predicate "nonimmediate_or_sse_const_operand"
1028  (ior (match_operand 0 "nonimmediate_operand")
1029       (match_test "standard_sse_constant_p (op, mode)")))
1030
1031;; Return true if OP is a register or a zero.
1032(define_predicate "reg_or_0_operand"
1033  (ior (match_operand 0 "register_operand")
1034       (match_operand 0 "const0_operand")))
1035
1036(define_predicate "norex_memory_operand"
1037  (and (match_operand 0 "memory_operand")
1038       (not (match_test "x86_extended_reg_mentioned_p (op)"))))
1039
1040;; Return true for RTX codes that force SImode address.
1041(define_predicate "SImode_address_operand"
1042  (match_code "subreg,zero_extend,and"))
1043
1044;; Return true if op if a valid address for LEA, and does not contain
1045;; a segment override.  Defined as a special predicate to allow
1046;; mode-less const_int operands pass to address_operand.
1047(define_special_predicate "address_no_seg_operand"
1048  (match_test "address_operand (op, VOIDmode)")
1049{
1050  struct ix86_address parts;
1051  int ok;
1052
1053  if (!CONST_INT_P (op)
1054      && mode != VOIDmode
1055      && GET_MODE (op) != mode)
1056    return false;
1057
1058  ok = ix86_decompose_address (op, &parts);
1059  gcc_assert (ok);
1060  return parts.seg == ADDR_SPACE_GENERIC;
1061})
1062
1063;; Return true if op if a valid base register, displacement or
1064;; sum of base register and displacement for VSIB addressing.
1065(define_predicate "vsib_address_operand"
1066  (match_test "address_operand (op, VOIDmode)")
1067{
1068  struct ix86_address parts;
1069  int ok;
1070  rtx disp;
1071
1072  ok = ix86_decompose_address (op, &parts);
1073  gcc_assert (ok);
1074  if (parts.index || parts.seg != ADDR_SPACE_GENERIC)
1075    return false;
1076
1077  /* VSIB addressing doesn't support (%rip).  */
1078  if (parts.disp)
1079    {
1080      disp = parts.disp;
1081      if (GET_CODE (disp) == CONST)
1082	{
1083	  disp = XEXP (disp, 0);
1084	  if (GET_CODE (disp) == PLUS)
1085	    disp = XEXP (disp, 0);
1086	  if (GET_CODE (disp) == UNSPEC)
1087	    switch (XINT (disp, 1))
1088	      {
1089	      case UNSPEC_GOTPCREL:
1090	      case UNSPEC_PCREL:
1091	      case UNSPEC_GOTNTPOFF:
1092		return false;
1093	      }
1094	}
1095      if (TARGET_64BIT
1096	  && flag_pic
1097	  && (GET_CODE (disp) == SYMBOL_REF
1098	      || GET_CODE (disp) == LABEL_REF))
1099	return false;
1100    }
1101
1102  return true;
1103})
1104
1105;; Return true if op is valid MPX address operand without base
1106(define_predicate "address_mpx_no_base_operand"
1107  (match_test "address_operand (op, VOIDmode)")
1108{
1109  struct ix86_address parts;
1110  int ok;
1111
1112  ok = ix86_decompose_address (op, &parts);
1113  gcc_assert (ok);
1114
1115  if (parts.index && parts.base)
1116    return false;
1117
1118  if (parts.seg != ADDR_SPACE_GENERIC)
1119    return false;
1120
1121  /* Do not support (%rip).  */
1122  if (parts.disp && flag_pic && TARGET_64BIT
1123      && SYMBOLIC_CONST (parts.disp))
1124    {
1125      if (GET_CODE (parts.disp) != CONST
1126	  || GET_CODE (XEXP (parts.disp, 0)) != PLUS
1127	  || GET_CODE (XEXP (XEXP (parts.disp, 0), 0)) != UNSPEC
1128	  || !CONST_INT_P (XEXP (XEXP (parts.disp, 0), 1))
1129	  || (XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_DTPOFF
1130	      && XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_NTPOFF))
1131	return false;
1132    }
1133
1134  return true;
1135})
1136
1137;; Return true if op is valid MPX address operand without index
1138(define_predicate "address_mpx_no_index_operand"
1139  (match_test "address_operand (op, VOIDmode)")
1140{
1141  struct ix86_address parts;
1142  int ok;
1143
1144  ok = ix86_decompose_address (op, &parts);
1145  gcc_assert (ok);
1146
1147  if (parts.index)
1148    return false;
1149
1150  if (parts.seg != ADDR_SPACE_GENERIC)
1151    return false;
1152
1153  /* Do not support (%rip).  */
1154  if (parts.disp && flag_pic && TARGET_64BIT
1155      && SYMBOLIC_CONST (parts.disp)
1156      && (GET_CODE (parts.disp) != CONST
1157	  || GET_CODE (XEXP (parts.disp, 0)) != PLUS
1158	  || GET_CODE (XEXP (XEXP (parts.disp, 0), 0)) != UNSPEC
1159	  || !CONST_INT_P (XEXP (XEXP (parts.disp, 0), 1))
1160	  || (XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_DTPOFF
1161	      && XINT (XEXP (XEXP (parts.disp, 0), 0), 1) != UNSPEC_NTPOFF)))
1162    return false;
1163
1164  return true;
1165})
1166
1167(define_predicate "vsib_mem_operator"
1168  (match_code "mem"))
1169
1170(define_predicate "bnd_mem_operator"
1171  (match_code "mem"))
1172
1173;; Return true if the rtx is known to be at least 32 bits aligned.
1174(define_predicate "aligned_operand"
1175  (match_operand 0 "general_operand")
1176{
1177  struct ix86_address parts;
1178  int ok;
1179
1180  /* Registers and immediate operands are always "aligned".  */
1181  if (!MEM_P (op))
1182    return true;
1183
1184  /* All patterns using aligned_operand on memory operands ends up
1185     in promoting memory operand to 64bit and thus causing memory mismatch.  */
1186  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
1187    return false;
1188
1189  /* Don't even try to do any aligned optimizations with volatiles.  */
1190  if (MEM_VOLATILE_P (op))
1191    return false;
1192
1193  if (MEM_ALIGN (op) >= 32)
1194    return true;
1195
1196  op = XEXP (op, 0);
1197
1198  /* Pushes and pops are only valid on the stack pointer.  */
1199  if (GET_CODE (op) == PRE_DEC
1200      || GET_CODE (op) == POST_INC)
1201    return true;
1202
1203  /* Decode the address.  */
1204  ok = ix86_decompose_address (op, &parts);
1205  gcc_assert (ok);
1206
1207  if (parts.base && SUBREG_P (parts.base))
1208    parts.base = SUBREG_REG (parts.base);
1209  if (parts.index && SUBREG_P (parts.index))
1210    parts.index = SUBREG_REG (parts.index);
1211
1212  /* Look for some component that isn't known to be aligned.  */
1213  if (parts.index)
1214    {
1215      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
1216	return false;
1217    }
1218  if (parts.base)
1219    {
1220      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
1221	return false;
1222    }
1223  if (parts.disp)
1224    {
1225      if (!CONST_INT_P (parts.disp)
1226	  || (INTVAL (parts.disp) & 3))
1227	return false;
1228    }
1229
1230  /* Didn't find one -- this must be an aligned address.  */
1231  return true;
1232})
1233
1234;; Return true if OP is memory operand with a displacement.
1235(define_predicate "memory_displacement_operand"
1236  (match_operand 0 "memory_operand")
1237{
1238  struct ix86_address parts;
1239  int ok;
1240
1241  ok = ix86_decompose_address (XEXP (op, 0), &parts);
1242  gcc_assert (ok);
1243  return parts.disp != NULL_RTX;
1244})
1245
1246;; Return true if OP is memory operand with a displacement only.
1247(define_predicate "memory_displacement_only_operand"
1248  (match_operand 0 "memory_operand")
1249{
1250  struct ix86_address parts;
1251  int ok;
1252
1253  if (TARGET_64BIT)
1254    return false;
1255
1256  ok = ix86_decompose_address (XEXP (op, 0), &parts);
1257  gcc_assert (ok);
1258
1259  if (parts.base || parts.index)
1260    return false;
1261
1262  return parts.disp != NULL_RTX;
1263})
1264
1265;; Return true if OP is memory operand that cannot be represented
1266;; by the modRM array.
1267(define_predicate "long_memory_operand"
1268  (and (match_operand 0 "memory_operand")
1269       (match_test "memory_address_length (op, false)")))
1270
1271;; Return true if OP is a comparison operator that can be issued by fcmov.
1272(define_predicate "fcmov_comparison_operator"
1273  (match_operand 0 "comparison_operator")
1274{
1275  machine_mode inmode = GET_MODE (XEXP (op, 0));
1276  enum rtx_code code = GET_CODE (op);
1277
1278  if (inmode == CCFPmode || inmode == CCFPUmode)
1279    {
1280      if (!ix86_trivial_fp_comparison_operator (op, mode))
1281	return false;
1282      code = ix86_fp_compare_code_to_integer (code);
1283    }
1284  /* i387 supports just limited amount of conditional codes.  */
1285  switch (code)
1286    {
1287    case LTU: case GTU: case LEU: case GEU:
1288      if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode
1289	  || inmode == CCCmode)
1290	return true;
1291      return false;
1292    case ORDERED: case UNORDERED:
1293    case EQ: case NE:
1294      return true;
1295    default:
1296      return false;
1297    }
1298})
1299
1300;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1301;; The first set are supported directly; the second set can't be done with
1302;; full IEEE support, i.e. NaNs.
1303
1304(define_predicate "sse_comparison_operator"
1305  (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1306       (and (match_test "TARGET_AVX")
1307	    (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1308
1309(define_predicate "ix86_comparison_int_operator"
1310  (match_code "ne,eq,ge,gt,le,lt"))
1311
1312(define_predicate "ix86_comparison_uns_operator"
1313  (match_code "ne,eq,geu,gtu,leu,ltu"))
1314
1315(define_predicate "bt_comparison_operator"
1316  (match_code "ne,eq"))
1317
1318;; Return true if OP is a valid comparison operator in valid mode.
1319(define_predicate "ix86_comparison_operator"
1320  (match_operand 0 "comparison_operator")
1321{
1322  machine_mode inmode = GET_MODE (XEXP (op, 0));
1323  enum rtx_code code = GET_CODE (op);
1324
1325  if (inmode == CCFPmode || inmode == CCFPUmode)
1326    return ix86_trivial_fp_comparison_operator (op, mode);
1327
1328  switch (code)
1329    {
1330    case EQ: case NE:
1331      return true;
1332    case LT: case GE:
1333      if (inmode == CCmode || inmode == CCGCmode
1334	  || inmode == CCGOCmode || inmode == CCNOmode)
1335	return true;
1336      return false;
1337    case LTU: case GTU: case LEU: case GEU:
1338      if (inmode == CCmode || inmode == CCCmode)
1339	return true;
1340      return false;
1341    case ORDERED: case UNORDERED:
1342      if (inmode == CCmode)
1343	return true;
1344      return false;
1345    case GT: case LE:
1346      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1347	return true;
1348      return false;
1349    default:
1350      return false;
1351    }
1352})
1353
1354;; Return true if OP is a valid comparison operator
1355;; testing carry flag to be set.
1356(define_predicate "ix86_carry_flag_operator"
1357  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1358{
1359  machine_mode inmode = GET_MODE (XEXP (op, 0));
1360  enum rtx_code code = GET_CODE (op);
1361
1362  if (inmode == CCFPmode || inmode == CCFPUmode)
1363    {
1364      if (!ix86_trivial_fp_comparison_operator (op, mode))
1365	return false;
1366      code = ix86_fp_compare_code_to_integer (code);
1367    }
1368  else if (inmode == CCCmode)
1369   return code == LTU || code == GTU;
1370  else if (inmode != CCmode)
1371    return false;
1372
1373  return code == LTU;
1374})
1375
1376;; Return true if this comparison only requires testing one flag bit.
1377(define_predicate "ix86_trivial_fp_comparison_operator"
1378  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1379
1380;; Return true if we know how to do this comparison.  Others require
1381;; testing more than one flag bit, and we let the generic middle-end
1382;; code do that.
1383(define_predicate "ix86_fp_comparison_operator"
1384  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1385                             == IX86_FPCMP_ARITH")
1386               (match_operand 0 "comparison_operator")
1387               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1388
1389;; Same as above, but for swapped comparison used in *jcc<fp>_<int>_i387.
1390(define_predicate "ix86_swapped_fp_comparison_operator"
1391  (match_operand 0 "comparison_operator")
1392{
1393  enum rtx_code code = GET_CODE (op);
1394  bool ret;
1395
1396  PUT_CODE (op, swap_condition (code));
1397  ret = ix86_fp_comparison_operator (op, mode);
1398  PUT_CODE (op, code);
1399  return ret;
1400})
1401
1402;; Nearly general operand, but accept any const_double, since we wish
1403;; to be able to drop them into memory rather than have them get pulled
1404;; into registers.
1405(define_predicate "cmp_fp_expander_operand"
1406  (ior (match_code "const_double")
1407       (match_operand 0 "general_operand")))
1408
1409;; Return true if this is a valid binary floating-point operation.
1410(define_predicate "binary_fp_operator"
1411  (match_code "plus,minus,mult,div"))
1412
1413;; Return true if this is a multiply operation.
1414(define_predicate "mult_operator"
1415  (match_code "mult"))
1416
1417;; Return true if this is a division operation.
1418(define_predicate "div_operator"
1419  (match_code "div"))
1420
1421;; Return true if this is a plus, minus, and, ior or xor operation.
1422(define_predicate "plusminuslogic_operator"
1423  (match_code "plus,minus,and,ior,xor"))
1424
1425;; Return true if this is a float extend operation.
1426(define_predicate "float_operator"
1427  (match_code "float"))
1428
1429;; Return true for ARITHMETIC_P.
1430(define_predicate "arith_or_logical_operator"
1431  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1432	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1433
1434;; Return true for COMMUTATIVE_P.
1435(define_predicate "commutative_operator"
1436  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1437
1438;; Return true if OP is a binary operator that can be promoted to wider mode.
1439(define_predicate "promotable_binary_operator"
1440  (ior (match_code "plus,minus,and,ior,xor,ashift")
1441       (and (match_code "mult")
1442	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1443
1444(define_predicate "compare_operator"
1445  (match_code "compare"))
1446
1447(define_predicate "absneg_operator"
1448  (match_code "abs,neg"))
1449
1450;; Return true if OP is a memory operand, aligned to
1451;; less than its natural alignment.
1452(define_predicate "misaligned_operand"
1453  (and (match_code "mem")
1454       (match_test "MEM_ALIGN (op) < GET_MODE_BITSIZE (mode)")))
1455
1456;; Return true if OP is a emms operation, known to be a PARALLEL.
1457(define_predicate "emms_operation"
1458  (match_code "parallel")
1459{
1460  unsigned i;
1461
1462  if (XVECLEN (op, 0) != 17)
1463    return false;
1464
1465  for (i = 0; i < 8; i++)
1466    {
1467      rtx elt = XVECEXP (op, 0, i+1);
1468
1469      if (GET_CODE (elt) != CLOBBER
1470	  || GET_CODE (SET_DEST (elt)) != REG
1471	  || GET_MODE (SET_DEST (elt)) != XFmode
1472	  || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1473        return false;
1474
1475      elt = XVECEXP (op, 0, i+9);
1476
1477      if (GET_CODE (elt) != CLOBBER
1478	  || GET_CODE (SET_DEST (elt)) != REG
1479	  || GET_MODE (SET_DEST (elt)) != DImode
1480	  || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1481	return false;
1482    }
1483  return true;
1484})
1485
1486;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1487(define_predicate "vzeroall_operation"
1488  (match_code "parallel")
1489{
1490  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1491
1492  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1493    return false;
1494
1495  for (i = 0; i < nregs; i++)
1496    {
1497      rtx elt = XVECEXP (op, 0, i+1);
1498
1499      if (GET_CODE (elt) != SET
1500	  || GET_CODE (SET_DEST (elt)) != REG
1501	  || GET_MODE (SET_DEST (elt)) != V8SImode
1502	  || REGNO (SET_DEST (elt)) != GET_SSE_REGNO (i)
1503	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1504	return false;
1505    }
1506  return true;
1507})
1508
1509;; return true if OP is a vzeroupper operation.
1510(define_predicate "vzeroupper_operation"
1511  (and (match_code "unspec_volatile")
1512       (match_test "XINT (op, 1) == UNSPECV_VZEROUPPER")))
1513
1514;; Return true if OP is an addsub vec_merge operation
1515(define_predicate "addsub_vm_operator"
1516  (match_code "vec_merge")
1517{
1518  rtx op0, op1;
1519  int swapped;
1520  HOST_WIDE_INT mask;
1521  int nunits, elt;
1522
1523  op0 = XEXP (op, 0);
1524  op1 = XEXP (op, 1);
1525
1526  /* Sanity check.  */
1527  if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1528    swapped = 0;
1529  else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1530    swapped = 1;
1531  else
1532    gcc_unreachable ();
1533
1534  mask = INTVAL (XEXP (op, 2));
1535  nunits = GET_MODE_NUNITS (mode);
1536
1537  for (elt = 0; elt < nunits; elt++)
1538    {
1539      /* bit clear: take from op0, set: take from op1  */
1540      int bit = !(mask & (HOST_WIDE_INT_1U << elt));
1541
1542      if (bit != ((elt & 1) ^ swapped))
1543	return false;
1544    }
1545
1546  return true;
1547})
1548
1549;; Return true if OP is an addsub vec_select/vec_concat operation
1550(define_predicate "addsub_vs_operator"
1551  (and (match_code "vec_select")
1552       (match_code "vec_concat" "0"))
1553{
1554  rtx op0, op1;
1555  bool swapped;
1556  int nunits, elt;
1557
1558  op0 = XEXP (XEXP (op, 0), 0);
1559  op1 = XEXP (XEXP (op, 0), 1);
1560
1561  /* Sanity check.  */
1562  if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1563    swapped = false;
1564  else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1565    swapped = true;
1566  else
1567    gcc_unreachable ();
1568
1569  nunits = GET_MODE_NUNITS (mode);
1570  if (XVECLEN (XEXP (op, 1), 0) != nunits)
1571    return false;
1572
1573  /* We already checked that permutation is suitable for addsub,
1574     so only look at the first element of the parallel.  */
1575  elt = INTVAL (XVECEXP (XEXP (op, 1), 0, 0));
1576
1577  return elt == (swapped ? nunits : 0);
1578})
1579
1580;; Return true if OP is a parallel for an addsub vec_select.
1581(define_predicate "addsub_vs_parallel"
1582  (and (match_code "parallel")
1583       (match_code "const_int" "a"))
1584{
1585  int nelt = XVECLEN (op, 0);
1586  int elt, i;
1587
1588  if (nelt < 2)
1589    return false;
1590
1591  /* Check that the permutation is suitable for addsub.
1592     For example, { 0 9 2 11 4 13 6 15 } or { 8 1 10 3 12 5 14 7 }.  */
1593  elt = INTVAL (XVECEXP (op, 0, 0));
1594  if (elt == 0)
1595    {
1596      for (i = 1; i < nelt; ++i)
1597	if (INTVAL (XVECEXP (op, 0, i)) != (i + (i & 1) * nelt))
1598	  return false;
1599    }
1600  else if (elt == nelt)
1601    {
1602      for (i = 1; i < nelt; ++i)
1603	if (INTVAL (XVECEXP (op, 0, i)) != (elt + i - (i & 1) * nelt))
1604	  return false;
1605    }
1606  else
1607    return false;
1608
1609  return true;
1610})
1611
1612;; Return true if OP is a parallel for a vbroadcast permute.
1613(define_predicate "avx_vbroadcast_operand"
1614  (and (match_code "parallel")
1615       (match_code "const_int" "a"))
1616{
1617  rtx elt = XVECEXP (op, 0, 0);
1618  int i, nelt = XVECLEN (op, 0);
1619
1620  /* Don't bother checking there are the right number of operands,
1621     merely that they're all identical.  */
1622  for (i = 1; i < nelt; ++i)
1623    if (XVECEXP (op, 0, i) != elt)
1624      return false;
1625  return true;
1626})
1627
1628;; Return true if OP is a parallel for a palignr permute.
1629(define_predicate "palignr_operand"
1630  (and (match_code "parallel")
1631       (match_code "const_int" "a"))
1632{
1633  int elt = INTVAL (XVECEXP (op, 0, 0));
1634  int i, nelt = XVECLEN (op, 0);
1635
1636  /* Check that an order in the permutation is suitable for palignr.
1637     For example, {5 6 7 0 1 2 3 4} is "palignr 5, xmm, xmm".  */
1638  for (i = 1; i < nelt; ++i)
1639    if (INTVAL (XVECEXP (op, 0, i)) != ((elt + i) % nelt))
1640      return false;
1641  return true;
1642})
1643
1644;; Return true if OP is a proper third operand to vpblendw256.
1645(define_predicate "avx2_pblendw_operand"
1646  (match_code "const_int")
1647{
1648  HOST_WIDE_INT val = INTVAL (op);
1649  HOST_WIDE_INT low = val & 0xff;
1650  return val == ((low << 8) | low);
1651})
1652
1653;; Return true if OP is vector_operand or CONST_VECTOR.
1654(define_predicate "general_vector_operand"
1655  (ior (match_operand 0 "vector_operand")
1656       (match_code "const_vector")))
1657
1658;; Return true if OP is either -1 constant or stored in register.
1659(define_predicate "register_or_constm1_operand"
1660  (ior (match_operand 0 "register_operand")
1661       (and (match_code "const_int")
1662	    (match_test "op == constm1_rtx"))))
1663