xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/i386/predicates.md (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004-2013 Free Software Foundation, Inc.
3;;
4;; This file is part of GCC.
5;;
6;; GCC is free software; you can redistribute it and/or modify
7;; it under the terms of the GNU General Public License as published by
8;; the Free Software Foundation; either version 3, or (at your option)
9;; any later version.
10;;
11;; GCC is distributed in the hope that it will be useful,
12;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14;; GNU General Public License for more details.
15;;
16;; You should have received a copy of the GNU General Public License
17;; along with GCC; see the file COPYING3.  If not see
18;; <http://www.gnu.org/licenses/>.
19
20;; Return true if OP is either a i387 or SSE fp register.
21(define_predicate "any_fp_register_operand"
22  (and (match_code "reg")
23       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
24
25;; Return true if OP is an i387 fp register.
26(define_predicate "fp_register_operand"
27  (and (match_code "reg")
28       (match_test "STACK_REGNO_P (REGNO (op))")))
29
30;; Return true if OP is a non-fp register_operand.
31(define_predicate "register_and_not_any_fp_reg_operand"
32  (and (match_code "reg")
33       (not (match_test "ANY_FP_REGNO_P (REGNO (op))"))))
34
35;; Return true if OP is a register operand other than an i387 fp register.
36(define_predicate "register_and_not_fp_reg_operand"
37  (and (match_code "reg")
38       (not (match_test "STACK_REGNO_P (REGNO (op))"))))
39
40;; True if the operand is an MMX register.
41(define_predicate "mmx_reg_operand"
42  (and (match_code "reg")
43       (match_test "MMX_REGNO_P (REGNO (op))")))
44
45;; True if the operand is an SSE register.
46(define_predicate "sse_reg_operand"
47  (and (match_code "reg")
48       (match_test "SSE_REGNO_P (REGNO (op))")))
49
50;; True if the operand is a Q_REGS class register.
51(define_predicate "q_regs_operand"
52  (match_operand 0 "register_operand")
53{
54  if (GET_CODE (op) == SUBREG)
55    op = SUBREG_REG (op);
56  return ANY_QI_REG_P (op);
57})
58
59;; Match an SI or HImode register for a zero_extract.
60(define_special_predicate "ext_register_operand"
61  (match_operand 0 "register_operand")
62{
63  if ((!TARGET_64BIT || GET_MODE (op) != DImode)
64      && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
65    return false;
66  if (GET_CODE (op) == SUBREG)
67    op = SUBREG_REG (op);
68
69  /* Be careful to accept only registers having upper parts.  */
70  return (REG_P (op)
71	  && (REGNO (op) > LAST_VIRTUAL_REGISTER || REGNO (op) <= BX_REG));
72})
73
74;; Return true if op is the AX register.
75(define_predicate "ax_reg_operand"
76  (and (match_code "reg")
77       (match_test "REGNO (op) == AX_REG")))
78
79;; Return true if op is the flags register.
80(define_predicate "flags_reg_operand"
81  (and (match_code "reg")
82       (match_test "REGNO (op) == FLAGS_REG")))
83
84;; Return true if op is one of QImode registers: %[abcd][hl].
85(define_predicate "QIreg_operand"
86  (match_test "QI_REG_P (op)"))
87
88;; Return true if op is a QImode register operand other than
89;; %[abcd][hl].
90(define_predicate "ext_QIreg_operand"
91  (and (match_code "reg")
92       (match_test "TARGET_64BIT")
93       (match_test "REGNO (op) > BX_REG")))
94
95;; Return true if VALUE can be stored in a sign extended immediate field.
96(define_predicate "x86_64_immediate_operand"
97  (match_code "const_int,symbol_ref,label_ref,const")
98{
99  if (!TARGET_64BIT)
100    return immediate_operand (op, mode);
101
102  switch (GET_CODE (op))
103    {
104    case CONST_INT:
105      /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known
106         to be at least 32 and this all acceptable constants are
107	 represented as CONST_INT.  */
108      if (HOST_BITS_PER_WIDE_INT == 32)
109	return true;
110      else
111	{
112	  HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (op), DImode);
113	  return trunc_int_for_mode (val, SImode) == val;
114	}
115      break;
116
117    case SYMBOL_REF:
118      /* For certain code models, the symbolic references are known to fit.
119	 in CM_SMALL_PIC model we know it fits if it is local to the shared
120	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
121	 only if inside of UNSPEC handled below.  */
122      /* TLS symbols are not constant.  */
123      if (SYMBOL_REF_TLS_MODEL (op))
124	return false;
125      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
126	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
127
128    case LABEL_REF:
129      /* For certain code models, the code is near as well.  */
130      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
131	      || ix86_cmodel == CM_KERNEL);
132
133    case CONST:
134      /* We also may accept the offsetted memory references in certain
135	 special cases.  */
136      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
137	switch (XINT (XEXP (op, 0), 1))
138	  {
139	  case UNSPEC_GOTPCREL:
140	  case UNSPEC_DTPOFF:
141	  case UNSPEC_GOTNTPOFF:
142	  case UNSPEC_NTPOFF:
143	    return true;
144	  default:
145	    break;
146	  }
147
148      if (GET_CODE (XEXP (op, 0)) == PLUS)
149	{
150	  rtx op1 = XEXP (XEXP (op, 0), 0);
151	  rtx op2 = XEXP (XEXP (op, 0), 1);
152	  HOST_WIDE_INT offset;
153
154	  if (ix86_cmodel == CM_LARGE)
155	    return false;
156	  if (!CONST_INT_P (op2))
157	    return false;
158	  offset = trunc_int_for_mode (INTVAL (op2), DImode);
159	  switch (GET_CODE (op1))
160	    {
161	    case SYMBOL_REF:
162	      /* TLS symbols are not constant.  */
163	      if (SYMBOL_REF_TLS_MODEL (op1))
164		return false;
165	      /* For CM_SMALL assume that latest object is 16MB before
166		 end of 31bits boundary.  We may also accept pretty
167		 large negative constants knowing that all objects are
168		 in the positive half of address space.  */
169	      if ((ix86_cmodel == CM_SMALL
170		   || (ix86_cmodel == CM_MEDIUM
171		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
172		  && offset < 16*1024*1024
173		  && trunc_int_for_mode (offset, SImode) == offset)
174		return true;
175	      /* For CM_KERNEL we know that all object resist in the
176		 negative half of 32bits address space.  We may not
177		 accept negative offsets, since they may be just off
178		 and we may accept pretty large positive ones.  */
179	      if (ix86_cmodel == CM_KERNEL
180		  && offset > 0
181		  && trunc_int_for_mode (offset, SImode) == offset)
182		return true;
183	      break;
184
185	    case LABEL_REF:
186	      /* These conditions are similar to SYMBOL_REF ones, just the
187		 constraints for code models differ.  */
188	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
189		  && offset < 16*1024*1024
190		  && trunc_int_for_mode (offset, SImode) == offset)
191		return true;
192	      if (ix86_cmodel == CM_KERNEL
193		  && offset > 0
194		  && trunc_int_for_mode (offset, SImode) == offset)
195		return true;
196	      break;
197
198	    case UNSPEC:
199	      switch (XINT (op1, 1))
200		{
201		case UNSPEC_DTPOFF:
202		case UNSPEC_NTPOFF:
203		  if (trunc_int_for_mode (offset, SImode) == offset)
204		    return true;
205		}
206	      break;
207
208	    default:
209	      break;
210	    }
211	}
212      break;
213
214      default:
215	gcc_unreachable ();
216    }
217
218  return false;
219})
220
221;; Return true if VALUE can be stored in the zero extended immediate field.
222(define_predicate "x86_64_zext_immediate_operand"
223  (match_code "const_double,const_int,symbol_ref,label_ref,const")
224{
225  switch (GET_CODE (op))
226    {
227    case CONST_DOUBLE:
228      if (HOST_BITS_PER_WIDE_INT == 32)
229	return (GET_MODE (op) == VOIDmode && !CONST_DOUBLE_HIGH (op));
230      else
231	return false;
232
233    case CONST_INT:
234      if (HOST_BITS_PER_WIDE_INT == 32)
235	return INTVAL (op) >= 0;
236      else
237	return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
238
239    case SYMBOL_REF:
240      /* For certain code models, the symbolic references are known to fit.  */
241      /* TLS symbols are not constant.  */
242      if (SYMBOL_REF_TLS_MODEL (op))
243	return false;
244      return (ix86_cmodel == CM_SMALL
245	      || (ix86_cmodel == CM_MEDIUM
246		  && !SYMBOL_REF_FAR_ADDR_P (op)));
247
248    case LABEL_REF:
249      /* For certain code models, the code is near as well.  */
250      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
251
252    case CONST:
253      /* We also may accept the offsetted memory references in certain
254	 special cases.  */
255      if (GET_CODE (XEXP (op, 0)) == PLUS)
256	{
257	  rtx op1 = XEXP (XEXP (op, 0), 0);
258	  rtx op2 = XEXP (XEXP (op, 0), 1);
259
260	  if (ix86_cmodel == CM_LARGE)
261	    return false;
262	  switch (GET_CODE (op1))
263	    {
264	    case SYMBOL_REF:
265	      /* TLS symbols are not constant.  */
266	      if (SYMBOL_REF_TLS_MODEL (op1))
267		return false;
268	      /* For small code model we may accept pretty large positive
269		 offsets, since one bit is available for free.  Negative
270		 offsets are limited by the size of NULL pointer area
271		 specified by the ABI.  */
272	      if ((ix86_cmodel == CM_SMALL
273		   || (ix86_cmodel == CM_MEDIUM
274		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
275		  && CONST_INT_P (op2)
276		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
277		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
278		return true;
279	      /* ??? For the kernel, we may accept adjustment of
280		 -0x10000000, since we know that it will just convert
281		 negative address space to positive, but perhaps this
282		 is not worthwhile.  */
283	      break;
284
285	    case LABEL_REF:
286	      /* These conditions are similar to SYMBOL_REF ones, just the
287		 constraints for code models differ.  */
288	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
289		  && CONST_INT_P (op2)
290		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
291		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
292		return true;
293	      break;
294
295	    default:
296	      return false;
297	    }
298	}
299      break;
300
301    default:
302      gcc_unreachable ();
303    }
304  return false;
305})
306
307;; Return true if OP is general operand representable on x86_64.
308(define_predicate "x86_64_general_operand"
309  (if_then_else (match_test "TARGET_64BIT")
310    (ior (match_operand 0 "nonimmediate_operand")
311	 (match_operand 0 "x86_64_immediate_operand"))
312    (match_operand 0 "general_operand")))
313
314;; Return true if OP is general operand representable on x86_64
315;; as zero extended constant.  This predicate is used in zero-extending
316;; conversion operations that require non-VOIDmode immediate operands.
317(define_predicate "x86_64_zext_general_operand"
318  (if_then_else (match_test "TARGET_64BIT")
319    (ior (match_operand 0 "nonimmediate_operand")
320	 (and (match_operand 0 "x86_64_zext_immediate_operand")
321	      (match_test "GET_MODE (op) != VOIDmode")))
322    (match_operand 0 "general_operand")))
323
324;; Return true if OP is general operand representable on x86_64
325;; as either sign extended or zero extended constant.
326(define_predicate "x86_64_szext_general_operand"
327  (if_then_else (match_test "TARGET_64BIT")
328    (ior (match_operand 0 "nonimmediate_operand")
329	 (match_operand 0 "x86_64_immediate_operand")
330	 (match_operand 0 "x86_64_zext_immediate_operand"))
331    (match_operand 0 "general_operand")))
332
333;; Return true if OP is nonmemory operand representable on x86_64.
334(define_predicate "x86_64_nonmemory_operand"
335  (if_then_else (match_test "TARGET_64BIT")
336    (ior (match_operand 0 "register_operand")
337	 (match_operand 0 "x86_64_immediate_operand"))
338    (match_operand 0 "nonmemory_operand")))
339
340;; Return true if OP is nonmemory operand representable on x86_64.
341(define_predicate "x86_64_szext_nonmemory_operand"
342  (if_then_else (match_test "TARGET_64BIT")
343    (ior (match_operand 0 "register_operand")
344	 (match_operand 0 "x86_64_immediate_operand")
345	 (match_operand 0 "x86_64_zext_immediate_operand"))
346    (match_operand 0 "nonmemory_operand")))
347
348;; Return true when operand is PIC expression that can be computed by lea
349;; operation.
350(define_predicate "pic_32bit_operand"
351  (match_code "const,symbol_ref,label_ref")
352{
353  if (!flag_pic)
354    return false;
355
356  /* Rule out relocations that translate into 64bit constants.  */
357  if (TARGET_64BIT && GET_CODE (op) == CONST)
358    {
359      op = XEXP (op, 0);
360      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
361	op = XEXP (op, 0);
362      if (GET_CODE (op) == UNSPEC
363	  && (XINT (op, 1) == UNSPEC_GOTOFF
364	      || XINT (op, 1) == UNSPEC_GOT))
365	return false;
366    }
367
368  return symbolic_operand (op, mode);
369})
370
371;; Return true if OP is nonmemory operand acceptable by movabs patterns.
372(define_predicate "x86_64_movabs_operand"
373  (and (match_operand 0 "nonmemory_operand")
374       (not (match_operand 0 "pic_32bit_operand"))))
375
376;; Return true if OP is either a symbol reference or a sum of a symbol
377;; reference and a constant.
378(define_predicate "symbolic_operand"
379  (match_code "symbol_ref,label_ref,const")
380{
381  switch (GET_CODE (op))
382    {
383    case SYMBOL_REF:
384    case LABEL_REF:
385      return true;
386
387    case CONST:
388      op = XEXP (op, 0);
389      if (GET_CODE (op) == SYMBOL_REF
390	  || GET_CODE (op) == LABEL_REF
391	  || (GET_CODE (op) == UNSPEC
392	      && (XINT (op, 1) == UNSPEC_GOT
393		  || XINT (op, 1) == UNSPEC_GOTOFF
394		  || XINT (op, 1) == UNSPEC_PCREL
395		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
396	return true;
397      if (GET_CODE (op) != PLUS
398	  || !CONST_INT_P (XEXP (op, 1)))
399	return false;
400
401      op = XEXP (op, 0);
402      if (GET_CODE (op) == SYMBOL_REF
403	  || GET_CODE (op) == LABEL_REF)
404	return true;
405      /* Only @GOTOFF gets offsets.  */
406      if (GET_CODE (op) != UNSPEC
407	  || XINT (op, 1) != UNSPEC_GOTOFF)
408	return false;
409
410      op = XVECEXP (op, 0, 0);
411      if (GET_CODE (op) == SYMBOL_REF
412	  || GET_CODE (op) == LABEL_REF)
413	return true;
414      return false;
415
416    default:
417      gcc_unreachable ();
418    }
419})
420
421;; Return true if OP is a symbolic operand that resolves locally.
422(define_predicate "local_symbolic_operand"
423  (match_code "const,label_ref,symbol_ref")
424{
425  if (GET_CODE (op) == CONST
426      && GET_CODE (XEXP (op, 0)) == PLUS
427      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
428    op = XEXP (XEXP (op, 0), 0);
429
430  if (GET_CODE (op) == LABEL_REF)
431    return true;
432
433  if (GET_CODE (op) != SYMBOL_REF)
434    return false;
435
436  if (SYMBOL_REF_TLS_MODEL (op))
437    return false;
438
439  if (SYMBOL_REF_LOCAL_P (op))
440    return true;
441
442  /* There is, however, a not insubstantial body of code in the rest of
443     the compiler that assumes it can just stick the results of
444     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
445  /* ??? This is a hack.  Should update the body of the compiler to
446     always create a DECL an invoke targetm.encode_section_info.  */
447  if (strncmp (XSTR (op, 0), internal_label_prefix,
448	       internal_label_prefix_len) == 0)
449    return true;
450
451  return false;
452})
453
454;; Test for a legitimate @GOTOFF operand.
455;;
456;; VxWorks does not impose a fixed gap between segments; the run-time
457;; gap can be different from the object-file gap.  We therefore can't
458;; use @GOTOFF unless we are absolutely sure that the symbol is in the
459;; same segment as the GOT.  Unfortunately, the flexibility of linker
460;; scripts means that we can't be sure of that in general, so assume
461;; that @GOTOFF is never valid on VxWorks.
462(define_predicate "gotoff_operand"
463  (and (not (match_test "TARGET_VXWORKS_RTP"))
464       (match_operand 0 "local_symbolic_operand")))
465
466;; Test for various thread-local symbols.
467(define_special_predicate "tls_symbolic_operand"
468  (and (match_code "symbol_ref")
469       (match_test "SYMBOL_REF_TLS_MODEL (op)")))
470
471(define_special_predicate "tls_modbase_operand"
472  (and (match_code "symbol_ref")
473       (match_test "op == ix86_tls_module_base ()")))
474
475;; Test for a pc-relative call operand
476(define_predicate "constant_call_address_operand"
477  (match_code "symbol_ref")
478{
479  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
480    return false;
481  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
482    return false;
483  return true;
484})
485
486;; P6 processors will jump to the address after the decrement when %esp
487;; is used as a call operand, so they will execute return address as a code.
488;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
489
490(define_predicate "call_register_no_elim_operand"
491  (match_operand 0 "register_operand")
492{
493  if (GET_CODE (op) == SUBREG)
494    op = SUBREG_REG (op);
495
496  if (!TARGET_64BIT && op == stack_pointer_rtx)
497    return false;
498
499  return register_no_elim_operand (op, mode);
500})
501
502;; True for any non-virtual or eliminable register.  Used in places where
503;; instantiation of such a register may cause the pattern to not be recognized.
504(define_predicate "register_no_elim_operand"
505  (match_operand 0 "register_operand")
506{
507  if (GET_CODE (op) == SUBREG)
508    op = SUBREG_REG (op);
509  return !(op == arg_pointer_rtx
510	   || op == frame_pointer_rtx
511	   || IN_RANGE (REGNO (op),
512			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
513})
514
515;; Similarly, but include the stack pointer.  This is used to prevent esp
516;; from being used as an index reg.
517(define_predicate "index_register_operand"
518  (match_operand 0 "register_operand")
519{
520  if (GET_CODE (op) == SUBREG)
521    op = SUBREG_REG (op);
522  if (reload_in_progress || reload_completed)
523    return REG_OK_FOR_INDEX_STRICT_P (op);
524  else
525    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
526})
527
528;; Return false if this is any eliminable register.  Otherwise general_operand.
529(define_predicate "general_no_elim_operand"
530  (if_then_else (match_code "reg,subreg")
531    (match_operand 0 "register_no_elim_operand")
532    (match_operand 0 "general_operand")))
533
534;; Return false if this is any eliminable register.  Otherwise
535;; register_operand or a constant.
536(define_predicate "nonmemory_no_elim_operand"
537  (ior (match_operand 0 "register_no_elim_operand")
538       (match_operand 0 "immediate_operand")))
539
540;; Test for a valid operand for indirect branch.
541(define_predicate "indirect_branch_operand"
542  (ior (match_operand 0 "register_operand")
543       (and (not (match_test "TARGET_X32"))
544	    (match_operand 0 "memory_operand"))))
545
546;; Test for a valid operand for a call instruction.
547;; Allow constant call address operands in Pmode only.
548(define_special_predicate "call_insn_operand"
549  (ior (match_test "constant_call_address_operand
550		     (op, mode == VOIDmode ? mode : Pmode)")
551       (match_operand 0 "call_register_no_elim_operand")
552       (and (not (match_test "TARGET_X32"))
553	    (match_operand 0 "memory_operand"))))
554
555;; Similarly, but for tail calls, in which we cannot allow memory references.
556(define_special_predicate "sibcall_insn_operand"
557  (ior (match_test "constant_call_address_operand
558		     (op, mode == VOIDmode ? mode : Pmode)")
559       (match_operand 0 "register_no_elim_operand")))
560
561;; Match exactly zero.
562(define_predicate "const0_operand"
563  (match_code "const_int,const_double,const_vector")
564{
565  if (mode == VOIDmode)
566    mode = GET_MODE (op);
567  return op == CONST0_RTX (mode);
568})
569
570;; Match one or vector filled with ones.
571(define_predicate "const1_operand"
572  (match_code "const_int,const_double,const_vector")
573{
574  if (mode == VOIDmode)
575    mode = GET_MODE (op);
576  return op == CONST1_RTX (mode);
577})
578
579;; Match exactly eight.
580(define_predicate "const8_operand"
581  (and (match_code "const_int")
582       (match_test "INTVAL (op) == 8")))
583
584;; Match exactly 128.
585(define_predicate "const128_operand"
586  (and (match_code "const_int")
587       (match_test "INTVAL (op) == 128")))
588
589;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
590(define_predicate "const_32bit_mask"
591  (and (match_code "const_int")
592       (match_test "trunc_int_for_mode (INTVAL (op), DImode)
593		    == (HOST_WIDE_INT) 0xffffffff")))
594
595;; Match 2, 4, or 8.  Used for leal multiplicands.
596(define_predicate "const248_operand"
597  (match_code "const_int")
598{
599  HOST_WIDE_INT i = INTVAL (op);
600  return i == 2 || i == 4 || i == 8;
601})
602
603;; Match 1, 2, 4, or 8
604(define_predicate "const1248_operand"
605  (match_code "const_int")
606{
607  HOST_WIDE_INT i = INTVAL (op);
608  return i == 1 || i == 2 || i == 4 || i == 8;
609})
610
611;; Match 3, 5, or 9.  Used for leal multiplicands.
612(define_predicate "const359_operand"
613  (match_code "const_int")
614{
615  HOST_WIDE_INT i = INTVAL (op);
616  return i == 3 || i == 5 || i == 9;
617})
618
619;; Match 0 or 1.
620(define_predicate "const_0_to_1_operand"
621  (and (match_code "const_int")
622       (ior (match_test "op == const0_rtx")
623	    (match_test "op == const1_rtx"))))
624
625;; Match 0 to 3.
626(define_predicate "const_0_to_3_operand"
627  (and (match_code "const_int")
628       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
629
630;; Match 0 to 7.
631(define_predicate "const_0_to_7_operand"
632  (and (match_code "const_int")
633       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
634
635;; Match 0 to 15.
636(define_predicate "const_0_to_15_operand"
637  (and (match_code "const_int")
638       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
639
640;; Match 0 to 31.
641(define_predicate "const_0_to_31_operand"
642  (and (match_code "const_int")
643       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
644
645;; Match 0 to 63.
646(define_predicate "const_0_to_63_operand"
647  (and (match_code "const_int")
648       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
649
650;; Match 0 to 255.
651(define_predicate "const_0_to_255_operand"
652  (and (match_code "const_int")
653       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
654
655;; Match (0 to 255) * 8
656(define_predicate "const_0_to_255_mul_8_operand"
657  (match_code "const_int")
658{
659  unsigned HOST_WIDE_INT val = INTVAL (op);
660  return val <= 255*8 && val % 8 == 0;
661})
662
663;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
664;; for shift & compare patterns, as shifting by 0 does not change flags).
665(define_predicate "const_1_to_31_operand"
666  (and (match_code "const_int")
667       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
668
669;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
670;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
671(define_predicate "const_1_to_63_operand"
672  (and (match_code "const_int")
673       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
674
675;; Match 2 or 3.
676(define_predicate "const_2_to_3_operand"
677  (and (match_code "const_int")
678       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
679
680;; Match 4 to 5.
681(define_predicate "const_4_to_5_operand"
682  (and (match_code "const_int")
683       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
684
685;; Match 4 to 7.
686(define_predicate "const_4_to_7_operand"
687  (and (match_code "const_int")
688       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
689
690;; Match 6 to 7.
691(define_predicate "const_6_to_7_operand"
692  (and (match_code "const_int")
693       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
694
695;; Match 8 to 11.
696(define_predicate "const_8_to_11_operand"
697  (and (match_code "const_int")
698       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
699
700;; Match 12 to 15.
701(define_predicate "const_12_to_15_operand"
702  (and (match_code "const_int")
703       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
704
705;; True if this is a constant appropriate for an increment or decrement.
706(define_predicate "incdec_operand"
707  (match_code "const_int")
708{
709  /* On Pentium4, the inc and dec operations causes extra dependency on flag
710     registers, since carry flag is not set.  */
711  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
712    return false;
713  return op == const1_rtx || op == constm1_rtx;
714})
715
716;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
717(define_predicate "reg_or_pm1_operand"
718  (ior (match_operand 0 "register_operand")
719       (and (match_code "const_int")
720	    (ior (match_test "op == const1_rtx")
721		 (match_test "op == constm1_rtx")))))
722
723;; True if OP is acceptable as operand of DImode shift expander.
724(define_predicate "shiftdi_operand"
725  (if_then_else (match_test "TARGET_64BIT")
726    (match_operand 0 "nonimmediate_operand")
727    (match_operand 0 "register_operand")))
728
729(define_predicate "ashldi_input_operand"
730  (if_then_else (match_test "TARGET_64BIT")
731    (match_operand 0 "nonimmediate_operand")
732    (match_operand 0 "reg_or_pm1_operand")))
733
734;; Return true if OP is a vector load from the constant pool with just
735;; the first element nonzero.
736(define_predicate "zero_extended_scalar_load_operand"
737  (match_code "mem")
738{
739  unsigned n_elts;
740  op = maybe_get_pool_constant (op);
741
742  if (!(op && GET_CODE (op) == CONST_VECTOR))
743    return false;
744
745  n_elts = CONST_VECTOR_NUNITS (op);
746
747  for (n_elts--; n_elts > 0; n_elts--)
748    {
749      rtx elt = CONST_VECTOR_ELT (op, n_elts);
750      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
751	return false;
752    }
753  return true;
754})
755
756/* Return true if operand is a vector constant that is all ones. */
757(define_predicate "vector_all_ones_operand"
758  (match_code "const_vector")
759{
760  int nunits = GET_MODE_NUNITS (mode);
761
762  if (GET_CODE (op) == CONST_VECTOR
763      && CONST_VECTOR_NUNITS (op) == nunits)
764    {
765      int i;
766      for (i = 0; i < nunits; ++i)
767        {
768          rtx x = CONST_VECTOR_ELT (op, i);
769          if (x != constm1_rtx)
770            return false;
771        }
772      return true;
773    }
774
775  return false;
776})
777
778; Return true when OP is operand acceptable for standard SSE move.
779(define_predicate "vector_move_operand"
780  (ior (match_operand 0 "nonimmediate_operand")
781       (match_operand 0 "const0_operand")))
782
783;; Return true when OP is either nonimmediate operand, or any
784;; CONST_VECTOR.
785(define_predicate "nonimmediate_or_const_vector_operand"
786  (ior (match_operand 0 "nonimmediate_operand")
787       (match_code "const_vector")))
788
789;; Return true when OP is nonimmediate or standard SSE constant.
790(define_predicate "nonimmediate_or_sse_const_operand"
791  (match_operand 0 "general_operand")
792{
793  if (nonimmediate_operand (op, mode))
794    return true;
795  if (standard_sse_constant_p (op) > 0)
796    return true;
797  return false;
798})
799
800;; Return true if OP is a register or a zero.
801(define_predicate "reg_or_0_operand"
802  (ior (match_operand 0 "register_operand")
803       (match_operand 0 "const0_operand")))
804
805;; Return true if op if a valid address for LEA, and does not contain
806;; a segment override.  Defined as a special predicate to allow
807;; mode-less const_int operands pass to address_operand.
808(define_special_predicate "lea_address_operand"
809  (match_test "address_operand (op, VOIDmode)")
810{
811  struct ix86_address parts;
812  int ok;
813
814  if (!CONST_INT_P (op)
815      && mode != VOIDmode
816      && GET_MODE (op) != mode)
817    return false;
818
819  ok = ix86_decompose_address (op, &parts);
820  gcc_assert (ok);
821  return parts.seg == SEG_DEFAULT;
822})
823
824;; Return true for RTX codes that force SImode address.
825(define_predicate "SImode_address_operand"
826  (match_code "subreg,zero_extend,and"))
827
828;; Return true if op if a valid base register, displacement or
829;; sum of base register and displacement for VSIB addressing.
830(define_predicate "vsib_address_operand"
831  (match_test "address_operand (op, VOIDmode)")
832{
833  struct ix86_address parts;
834  int ok;
835  rtx disp;
836
837  ok = ix86_decompose_address (op, &parts);
838  gcc_assert (ok);
839  if (parts.index || parts.seg != SEG_DEFAULT)
840    return false;
841
842  /* VSIB addressing doesn't support (%rip).  */
843  if (parts.disp)
844    {
845      disp = parts.disp;
846      if (GET_CODE (disp) == CONST)
847	{
848	  disp = XEXP (disp, 0);
849	  if (GET_CODE (disp) == PLUS)
850	    disp = XEXP (disp, 0);
851	  if (GET_CODE (disp) == UNSPEC)
852	    switch (XINT (disp, 1))
853	      {
854	      case UNSPEC_GOTPCREL:
855	      case UNSPEC_PCREL:
856	      case UNSPEC_GOTNTPOFF:
857		return false;
858	      }
859	}
860      if (TARGET_64BIT
861	  && flag_pic
862	  && (GET_CODE (disp) == SYMBOL_REF
863	      || GET_CODE (disp) == LABEL_REF))
864	return false;
865    }
866
867  return true;
868})
869
870(define_predicate "vsib_mem_operator"
871  (match_code "mem"))
872
873;; Return true if the rtx is known to be at least 32 bits aligned.
874(define_predicate "aligned_operand"
875  (match_operand 0 "general_operand")
876{
877  struct ix86_address parts;
878  int ok;
879
880  /* Registers and immediate operands are always "aligned".  */
881  if (!MEM_P (op))
882    return true;
883
884  /* All patterns using aligned_operand on memory operands ends up
885     in promoting memory operand to 64bit and thus causing memory mismatch.  */
886  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
887    return false;
888
889  /* Don't even try to do any aligned optimizations with volatiles.  */
890  if (MEM_VOLATILE_P (op))
891    return false;
892
893  if (MEM_ALIGN (op) >= 32)
894    return true;
895
896  op = XEXP (op, 0);
897
898  /* Pushes and pops are only valid on the stack pointer.  */
899  if (GET_CODE (op) == PRE_DEC
900      || GET_CODE (op) == POST_INC)
901    return true;
902
903  /* Decode the address.  */
904  ok = ix86_decompose_address (op, &parts);
905  gcc_assert (ok);
906
907  if (parts.base && GET_CODE (parts.base) == SUBREG)
908    parts.base = SUBREG_REG (parts.base);
909  if (parts.index && GET_CODE (parts.index) == SUBREG)
910    parts.index = SUBREG_REG (parts.index);
911
912  /* Look for some component that isn't known to be aligned.  */
913  if (parts.index)
914    {
915      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
916	return false;
917    }
918  if (parts.base)
919    {
920      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
921	return false;
922    }
923  if (parts.disp)
924    {
925      if (!CONST_INT_P (parts.disp)
926	  || (INTVAL (parts.disp) & 3))
927	return false;
928    }
929
930  /* Didn't find one -- this must be an aligned address.  */
931  return true;
932})
933
934;; Return true if OP is memory operand with a displacement.
935(define_predicate "memory_displacement_operand"
936  (match_operand 0 "memory_operand")
937{
938  struct ix86_address parts;
939  int ok;
940
941  ok = ix86_decompose_address (XEXP (op, 0), &parts);
942  gcc_assert (ok);
943  return parts.disp != NULL_RTX;
944})
945
946;; Return true if OP is memory operand with a displacement only.
947(define_predicate "memory_displacement_only_operand"
948  (match_operand 0 "memory_operand")
949{
950  struct ix86_address parts;
951  int ok;
952
953  if (TARGET_64BIT)
954    return false;
955
956  ok = ix86_decompose_address (XEXP (op, 0), &parts);
957  gcc_assert (ok);
958
959  if (parts.base || parts.index)
960    return false;
961
962  return parts.disp != NULL_RTX;
963})
964
965;; Return true if OP is memory operand which will need zero or
966;; one register at most, not counting stack pointer or frame pointer.
967(define_predicate "cmpxchg8b_pic_memory_operand"
968  (match_operand 0 "memory_operand")
969{
970  struct ix86_address parts;
971  int ok;
972
973  if (TARGET_64BIT || !flag_pic)
974    return true;
975
976  ok = ix86_decompose_address (XEXP (op, 0), &parts);
977  gcc_assert (ok);
978
979  if (parts.base && GET_CODE (parts.base) == SUBREG)
980    parts.base = SUBREG_REG (parts.base);
981  if (parts.index && GET_CODE (parts.index) == SUBREG)
982    parts.index = SUBREG_REG (parts.index);
983
984  if (parts.base == NULL_RTX
985      || parts.base == arg_pointer_rtx
986      || parts.base == frame_pointer_rtx
987      || parts.base == hard_frame_pointer_rtx
988      || parts.base == stack_pointer_rtx)
989    return true;
990
991  if (parts.index == NULL_RTX
992      || parts.index == arg_pointer_rtx
993      || parts.index == frame_pointer_rtx
994      || parts.index == hard_frame_pointer_rtx
995      || parts.index == stack_pointer_rtx)
996    return true;
997
998  return false;
999})
1000
1001
1002;; Return true if OP is memory operand that cannot be represented
1003;; by the modRM array.
1004(define_predicate "long_memory_operand"
1005  (and (match_operand 0 "memory_operand")
1006       (match_test "memory_address_length (op, false)")))
1007
1008;; Return true if OP is a comparison operator that can be issued by fcmov.
1009(define_predicate "fcmov_comparison_operator"
1010  (match_operand 0 "comparison_operator")
1011{
1012  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1013  enum rtx_code code = GET_CODE (op);
1014
1015  if (inmode == CCFPmode || inmode == CCFPUmode)
1016    {
1017      if (!ix86_trivial_fp_comparison_operator (op, mode))
1018	return false;
1019      code = ix86_fp_compare_code_to_integer (code);
1020    }
1021  /* i387 supports just limited amount of conditional codes.  */
1022  switch (code)
1023    {
1024    case LTU: case GTU: case LEU: case GEU:
1025      if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode
1026	  || inmode == CCCmode)
1027	return true;
1028      return false;
1029    case ORDERED: case UNORDERED:
1030    case EQ: case NE:
1031      return true;
1032    default:
1033      return false;
1034    }
1035})
1036
1037;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1038;; The first set are supported directly; the second set can't be done with
1039;; full IEEE support, i.e. NaNs.
1040
1041(define_predicate "sse_comparison_operator"
1042  (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1043       (and (match_test "TARGET_AVX")
1044	    (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1045
1046(define_predicate "ix86_comparison_int_operator"
1047  (match_code "ne,eq,ge,gt,le,lt"))
1048
1049(define_predicate "ix86_comparison_uns_operator"
1050  (match_code "ne,eq,geu,gtu,leu,ltu"))
1051
1052(define_predicate "bt_comparison_operator"
1053  (match_code "ne,eq"))
1054
1055;; Return true if OP is a valid comparison operator in valid mode.
1056(define_predicate "ix86_comparison_operator"
1057  (match_operand 0 "comparison_operator")
1058{
1059  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1060  enum rtx_code code = GET_CODE (op);
1061
1062  if (inmode == CCFPmode || inmode == CCFPUmode)
1063    return ix86_trivial_fp_comparison_operator (op, mode);
1064
1065  switch (code)
1066    {
1067    case EQ: case NE:
1068      return true;
1069    case LT: case GE:
1070      if (inmode == CCmode || inmode == CCGCmode
1071	  || inmode == CCGOCmode || inmode == CCNOmode)
1072	return true;
1073      return false;
1074    case LTU: case GTU: case LEU: case GEU:
1075      if (inmode == CCmode || inmode == CCCmode)
1076	return true;
1077      return false;
1078    case ORDERED: case UNORDERED:
1079      if (inmode == CCmode)
1080	return true;
1081      return false;
1082    case GT: case LE:
1083      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1084	return true;
1085      return false;
1086    default:
1087      return false;
1088    }
1089})
1090
1091;; Return true if OP is a valid comparison operator
1092;; testing carry flag to be set.
1093(define_predicate "ix86_carry_flag_operator"
1094  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1095{
1096  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1097  enum rtx_code code = GET_CODE (op);
1098
1099  if (inmode == CCFPmode || inmode == CCFPUmode)
1100    {
1101      if (!ix86_trivial_fp_comparison_operator (op, mode))
1102	return false;
1103      code = ix86_fp_compare_code_to_integer (code);
1104    }
1105  else if (inmode == CCCmode)
1106   return code == LTU || code == GTU;
1107  else if (inmode != CCmode)
1108    return false;
1109
1110  return code == LTU;
1111})
1112
1113;; Return true if this comparison only requires testing one flag bit.
1114(define_predicate "ix86_trivial_fp_comparison_operator"
1115  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1116
1117;; Return true if we know how to do this comparison.  Others require
1118;; testing more than one flag bit, and we let the generic middle-end
1119;; code do that.
1120(define_predicate "ix86_fp_comparison_operator"
1121  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1122                             == IX86_FPCMP_ARITH")
1123               (match_operand 0 "comparison_operator")
1124               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1125
1126;; Same as above, but for swapped comparison used in *jcc<fp>_<int>_i387.
1127(define_predicate "ix86_swapped_fp_comparison_operator"
1128  (match_operand 0 "comparison_operator")
1129{
1130  enum rtx_code code = GET_CODE (op);
1131  bool ret;
1132
1133  PUT_CODE (op, swap_condition (code));
1134  ret = ix86_fp_comparison_operator (op, mode);
1135  PUT_CODE (op, code);
1136  return ret;
1137})
1138
1139;; Nearly general operand, but accept any const_double, since we wish
1140;; to be able to drop them into memory rather than have them get pulled
1141;; into registers.
1142(define_predicate "cmp_fp_expander_operand"
1143  (ior (match_code "const_double")
1144       (match_operand 0 "general_operand")))
1145
1146;; Return true if this is a valid binary floating-point operation.
1147(define_predicate "binary_fp_operator"
1148  (match_code "plus,minus,mult,div"))
1149
1150;; Return true if this is a multiply operation.
1151(define_predicate "mult_operator"
1152  (match_code "mult"))
1153
1154;; Return true if this is a division operation.
1155(define_predicate "div_operator"
1156  (match_code "div"))
1157
1158;; Return true if this is a plus, minus, and, ior or xor operation.
1159(define_predicate "plusminuslogic_operator"
1160  (match_code "plus,minus,and,ior,xor"))
1161
1162;; Return true if this is a float extend operation.
1163(define_predicate "float_operator"
1164  (match_code "float"))
1165
1166;; Return true for ARITHMETIC_P.
1167(define_predicate "arith_or_logical_operator"
1168  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1169	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1170
1171;; Return true for COMMUTATIVE_P.
1172(define_predicate "commutative_operator"
1173  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1174
1175;; Return true if OP is a binary operator that can be promoted to wider mode.
1176(define_predicate "promotable_binary_operator"
1177  (ior (match_code "plus,minus,and,ior,xor,ashift")
1178       (and (match_code "mult")
1179	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1180
1181(define_predicate "compare_operator"
1182  (match_code "compare"))
1183
1184(define_predicate "absneg_operator"
1185  (match_code "abs,neg"))
1186
1187;; Return true if OP is misaligned memory operand
1188(define_predicate "misaligned_operand"
1189  (and (match_code "mem")
1190       (match_test "MEM_ALIGN (op) < GET_MODE_ALIGNMENT (mode)")))
1191
1192;; Return true if OP is a emms operation, known to be a PARALLEL.
1193(define_predicate "emms_operation"
1194  (match_code "parallel")
1195{
1196  unsigned i;
1197
1198  if (XVECLEN (op, 0) != 17)
1199    return false;
1200
1201  for (i = 0; i < 8; i++)
1202    {
1203      rtx elt = XVECEXP (op, 0, i+1);
1204
1205      if (GET_CODE (elt) != CLOBBER
1206	  || GET_CODE (SET_DEST (elt)) != REG
1207	  || GET_MODE (SET_DEST (elt)) != XFmode
1208	  || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1209        return false;
1210
1211      elt = XVECEXP (op, 0, i+9);
1212
1213      if (GET_CODE (elt) != CLOBBER
1214	  || GET_CODE (SET_DEST (elt)) != REG
1215	  || GET_MODE (SET_DEST (elt)) != DImode
1216	  || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1217	return false;
1218    }
1219  return true;
1220})
1221
1222;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1223(define_predicate "vzeroall_operation"
1224  (match_code "parallel")
1225{
1226  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1227
1228  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1229    return false;
1230
1231  for (i = 0; i < nregs; i++)
1232    {
1233      rtx elt = XVECEXP (op, 0, i+1);
1234
1235      if (GET_CODE (elt) != SET
1236	  || GET_CODE (SET_DEST (elt)) != REG
1237	  || GET_MODE (SET_DEST (elt)) != V8SImode
1238	  || REGNO (SET_DEST (elt)) != SSE_REGNO (i)
1239	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1240	return false;
1241    }
1242  return true;
1243})
1244
1245;; return true if OP is a vzeroupper operation.
1246(define_predicate "vzeroupper_operation"
1247  (and (match_code "unspec_volatile")
1248       (match_test "XINT (op, 1) == UNSPECV_VZEROUPPER")))
1249
1250;; Return true if OP is a parallel for a vbroadcast permute.
1251
1252(define_predicate "avx_vbroadcast_operand"
1253  (and (match_code "parallel")
1254       (match_code "const_int" "a"))
1255{
1256  rtx elt = XVECEXP (op, 0, 0);
1257  int i, nelt = XVECLEN (op, 0);
1258
1259  /* Don't bother checking there are the right number of operands,
1260     merely that they're all identical.  */
1261  for (i = 1; i < nelt; ++i)
1262    if (XVECEXP (op, 0, i) != elt)
1263      return false;
1264  return true;
1265})
1266
1267;; Return true if OP is a proper third operand to vpblendw256.
1268(define_predicate "avx2_pblendw_operand"
1269  (match_code "const_int")
1270{
1271  HOST_WIDE_INT val = INTVAL (op);
1272  HOST_WIDE_INT low = val & 0xff;
1273  return val == ((low << 8) | low);
1274})
1275