xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/i386/predicates.md (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009
3;; Free Software Foundation, Inc.
4;;
5;; This file is part of GCC.
6;;
7;; GCC is free software; you can redistribute it and/or modify
8;; it under the terms of the GNU General Public License as published by
9;; the Free Software Foundation; either version 3, or (at your option)
10;; any later version.
11;;
12;; GCC is distributed in the hope that it will be useful,
13;; but WITHOUT ANY WARRANTY; without even the implied warranty of
14;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15;; GNU General Public License for more details.
16;;
17;; You should have received a copy of the GNU General Public License
18;; along with GCC; see the file COPYING3.  If not see
19;; <http://www.gnu.org/licenses/>.
20
21;; Return nonzero if OP is either a i387 or SSE fp register.
22(define_predicate "any_fp_register_operand"
23  (and (match_code "reg")
24       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
25
26;; Return nonzero if OP is an i387 fp register.
27(define_predicate "fp_register_operand"
28  (and (match_code "reg")
29       (match_test "FP_REGNO_P (REGNO (op))")))
30
31;; Return nonzero if OP is a non-fp register_operand.
32(define_predicate "register_and_not_any_fp_reg_operand"
33  (and (match_code "reg")
34       (not (match_test "ANY_FP_REGNO_P (REGNO (op))"))))
35
36;; Return nonzero if OP is a register operand other than an i387 fp register.
37(define_predicate "register_and_not_fp_reg_operand"
38  (and (match_code "reg")
39       (not (match_test "FP_REGNO_P (REGNO (op))"))))
40
41;; True if the operand is an MMX register.
42(define_predicate "mmx_reg_operand"
43  (and (match_code "reg")
44       (match_test "MMX_REGNO_P (REGNO (op))")))
45
46;; True if the operand is a Q_REGS class register.
47(define_predicate "q_regs_operand"
48  (match_operand 0 "register_operand")
49{
50  if (GET_CODE (op) == SUBREG)
51    op = SUBREG_REG (op);
52  return ANY_QI_REG_P (op);
53})
54
55;; Match an SI or HImode register for a zero_extract.
56(define_special_predicate "ext_register_operand"
57  (match_operand 0 "register_operand")
58{
59  if ((!TARGET_64BIT || GET_MODE (op) != DImode)
60      && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
61    return 0;
62  if (GET_CODE (op) == SUBREG)
63    op = SUBREG_REG (op);
64
65  /* Be careful to accept only registers having upper parts.  */
66  return REGNO (op) > LAST_VIRTUAL_REGISTER || REGNO (op) < 4;
67})
68
69;; Return true if op is the AX register.
70(define_predicate "ax_reg_operand"
71  (and (match_code "reg")
72       (match_test "REGNO (op) == 0")))
73
74;; Return true if op is the flags register.
75(define_predicate "flags_reg_operand"
76  (and (match_code "reg")
77       (match_test "REGNO (op) == FLAGS_REG")))
78
79;; Return true if op is a QImode register operand other than
80;; %[abcd][hl].
81(define_predicate "ext_QIreg_operand"
82  (and (match_code "reg")
83       (match_test "TARGET_64BIT
84		    && GET_MODE (op) == QImode
85		    && REGNO (op) > BX_REG")))
86
87;; Similarly, but don't check mode of the operand.
88(define_predicate "ext_QIreg_nomode_operand"
89  (and (match_code "reg")
90       (match_test "TARGET_64BIT
91		    && REGNO (op) > BX_REG")))
92
93;; Return true if op is not xmm0 register.
94(define_predicate "reg_not_xmm0_operand"
95   (and (match_operand 0 "register_operand")
96	(match_test "!REG_P (op)
97		     || REGNO (op) != FIRST_SSE_REG")))
98
99;; As above, but allow nonimmediate operands.
100(define_predicate "nonimm_not_xmm0_operand"
101   (and (match_operand 0 "nonimmediate_operand")
102	(match_test "!REG_P (op)
103		     || REGNO (op) != FIRST_SSE_REG")))
104
105;; Return 1 if VALUE can be stored in a sign extended immediate field.
106(define_predicate "x86_64_immediate_operand"
107  (match_code "const_int,symbol_ref,label_ref,const")
108{
109  if (!TARGET_64BIT)
110    return immediate_operand (op, mode);
111
112  switch (GET_CODE (op))
113    {
114    case CONST_INT:
115      /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known
116         to be at least 32 and this all acceptable constants are
117	 represented as CONST_INT.  */
118      if (HOST_BITS_PER_WIDE_INT == 32)
119	return 1;
120      else
121	{
122	  HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (op), DImode);
123	  return trunc_int_for_mode (val, SImode) == val;
124	}
125      break;
126
127    case SYMBOL_REF:
128      /* For certain code models, the symbolic references are known to fit.
129	 in CM_SMALL_PIC model we know it fits if it is local to the shared
130	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
131	 only if inside of UNSPEC handled below.  */
132      /* TLS symbols are not constant.  */
133      if (SYMBOL_REF_TLS_MODEL (op))
134	return false;
135      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
136	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
137
138    case LABEL_REF:
139      /* For certain code models, the code is near as well.  */
140      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
141	      || ix86_cmodel == CM_KERNEL);
142
143    case CONST:
144      /* We also may accept the offsetted memory references in certain
145	 special cases.  */
146      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
147	switch (XINT (XEXP (op, 0), 1))
148	  {
149	  case UNSPEC_GOTPCREL:
150	  case UNSPEC_DTPOFF:
151	  case UNSPEC_GOTNTPOFF:
152	  case UNSPEC_NTPOFF:
153	    return 1;
154	  default:
155	    break;
156	  }
157
158      if (GET_CODE (XEXP (op, 0)) == PLUS)
159	{
160	  rtx op1 = XEXP (XEXP (op, 0), 0);
161	  rtx op2 = XEXP (XEXP (op, 0), 1);
162	  HOST_WIDE_INT offset;
163
164	  if (ix86_cmodel == CM_LARGE)
165	    return 0;
166	  if (!CONST_INT_P (op2))
167	    return 0;
168	  offset = trunc_int_for_mode (INTVAL (op2), DImode);
169	  switch (GET_CODE (op1))
170	    {
171	    case SYMBOL_REF:
172	      /* TLS symbols are not constant.  */
173	      if (SYMBOL_REF_TLS_MODEL (op1))
174		return 0;
175	      /* For CM_SMALL assume that latest object is 16MB before
176		 end of 31bits boundary.  We may also accept pretty
177		 large negative constants knowing that all objects are
178		 in the positive half of address space.  */
179	      if ((ix86_cmodel == CM_SMALL
180		   || (ix86_cmodel == CM_MEDIUM
181		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
182		  && offset < 16*1024*1024
183		  && trunc_int_for_mode (offset, SImode) == offset)
184		return 1;
185	      /* For CM_KERNEL we know that all object resist in the
186		 negative half of 32bits address space.  We may not
187		 accept negative offsets, since they may be just off
188		 and we may accept pretty large positive ones.  */
189	      if (ix86_cmodel == CM_KERNEL
190		  && offset > 0
191		  && trunc_int_for_mode (offset, SImode) == offset)
192		return 1;
193	      break;
194
195	    case LABEL_REF:
196	      /* These conditions are similar to SYMBOL_REF ones, just the
197		 constraints for code models differ.  */
198	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
199		  && offset < 16*1024*1024
200		  && trunc_int_for_mode (offset, SImode) == offset)
201		return 1;
202	      if (ix86_cmodel == CM_KERNEL
203		  && offset > 0
204		  && trunc_int_for_mode (offset, SImode) == offset)
205		return 1;
206	      break;
207
208	    case UNSPEC:
209	      switch (XINT (op1, 1))
210		{
211		case UNSPEC_DTPOFF:
212		case UNSPEC_NTPOFF:
213		  if (offset > 0
214		      && trunc_int_for_mode (offset, SImode) == offset)
215		    return 1;
216		}
217	      break;
218
219	    default:
220	      break;
221	    }
222	}
223      break;
224
225      default:
226	gcc_unreachable ();
227    }
228
229  return 0;
230})
231
232;; Return 1 if VALUE can be stored in the zero extended immediate field.
233(define_predicate "x86_64_zext_immediate_operand"
234  (match_code "const_double,const_int,symbol_ref,label_ref,const")
235{
236  switch (GET_CODE (op))
237    {
238    case CONST_DOUBLE:
239      if (HOST_BITS_PER_WIDE_INT == 32)
240	return (GET_MODE (op) == VOIDmode && !CONST_DOUBLE_HIGH (op));
241      else
242	return 0;
243
244    case CONST_INT:
245      if (HOST_BITS_PER_WIDE_INT == 32)
246	return INTVAL (op) >= 0;
247      else
248	return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
249
250    case SYMBOL_REF:
251      /* For certain code models, the symbolic references are known to fit.  */
252      /* TLS symbols are not constant.  */
253      if (SYMBOL_REF_TLS_MODEL (op))
254	return false;
255      return (ix86_cmodel == CM_SMALL
256	      || (ix86_cmodel == CM_MEDIUM
257		  && !SYMBOL_REF_FAR_ADDR_P (op)));
258
259    case LABEL_REF:
260      /* For certain code models, the code is near as well.  */
261      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
262
263    case CONST:
264      /* We also may accept the offsetted memory references in certain
265	 special cases.  */
266      if (GET_CODE (XEXP (op, 0)) == PLUS)
267	{
268	  rtx op1 = XEXP (XEXP (op, 0), 0);
269	  rtx op2 = XEXP (XEXP (op, 0), 1);
270
271	  if (ix86_cmodel == CM_LARGE)
272	    return 0;
273	  switch (GET_CODE (op1))
274	    {
275	    case SYMBOL_REF:
276	      /* TLS symbols are not constant.  */
277	      if (SYMBOL_REF_TLS_MODEL (op1))
278		return 0;
279	      /* For small code model we may accept pretty large positive
280		 offsets, since one bit is available for free.  Negative
281		 offsets are limited by the size of NULL pointer area
282		 specified by the ABI.  */
283	      if ((ix86_cmodel == CM_SMALL
284		   || (ix86_cmodel == CM_MEDIUM
285		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
286		  && CONST_INT_P (op2)
287		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
288		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
289		return 1;
290	      /* ??? For the kernel, we may accept adjustment of
291		 -0x10000000, since we know that it will just convert
292		 negative address space to positive, but perhaps this
293		 is not worthwhile.  */
294	      break;
295
296	    case LABEL_REF:
297	      /* These conditions are similar to SYMBOL_REF ones, just the
298		 constraints for code models differ.  */
299	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
300		  && CONST_INT_P (op2)
301		  && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
302		  && trunc_int_for_mode (INTVAL (op2), SImode) == INTVAL (op2))
303		return 1;
304	      break;
305
306	    default:
307	      return 0;
308	    }
309	}
310      break;
311
312    default:
313      gcc_unreachable ();
314    }
315  return 0;
316})
317
318;; Return nonzero if OP is general operand representable on x86_64.
319(define_predicate "x86_64_general_operand"
320  (if_then_else (match_test "TARGET_64BIT")
321    (ior (match_operand 0 "nonimmediate_operand")
322	 (match_operand 0 "x86_64_immediate_operand"))
323    (match_operand 0 "general_operand")))
324
325;; Return nonzero if OP is general operand representable on x86_64
326;; as either sign extended or zero extended constant.
327(define_predicate "x86_64_szext_general_operand"
328  (if_then_else (match_test "TARGET_64BIT")
329    (ior (match_operand 0 "nonimmediate_operand")
330	 (match_operand 0 "x86_64_immediate_operand")
331	 (match_operand 0 "x86_64_zext_immediate_operand"))
332    (match_operand 0 "general_operand")))
333
334;; Return nonzero if OP is nonmemory operand representable on x86_64.
335(define_predicate "x86_64_nonmemory_operand"
336  (if_then_else (match_test "TARGET_64BIT")
337    (ior (match_operand 0 "register_operand")
338	 (match_operand 0 "x86_64_immediate_operand"))
339    (match_operand 0 "nonmemory_operand")))
340
341;; Return nonzero if OP is nonmemory operand representable on x86_64.
342(define_predicate "x86_64_szext_nonmemory_operand"
343  (if_then_else (match_test "TARGET_64BIT")
344    (ior (match_operand 0 "register_operand")
345	 (match_operand 0 "x86_64_immediate_operand")
346	 (match_operand 0 "x86_64_zext_immediate_operand"))
347    (match_operand 0 "nonmemory_operand")))
348
349;; Return true when operand is PIC expression that can be computed by lea
350;; operation.
351(define_predicate "pic_32bit_operand"
352  (match_code "const,symbol_ref,label_ref")
353{
354  if (!flag_pic)
355    return 0;
356  /* Rule out relocations that translate into 64bit constants.  */
357  if (TARGET_64BIT && GET_CODE (op) == CONST)
358    {
359      op = XEXP (op, 0);
360      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
361	op = XEXP (op, 0);
362      if (GET_CODE (op) == UNSPEC
363	  && (XINT (op, 1) == UNSPEC_GOTOFF
364	      || XINT (op, 1) == UNSPEC_GOT))
365	return 0;
366    }
367  return symbolic_operand (op, mode);
368})
369
370
371;; Return nonzero if OP is nonmemory operand acceptable by movabs patterns.
372(define_predicate "x86_64_movabs_operand"
373  (if_then_else (match_test "!TARGET_64BIT || !flag_pic")
374    (match_operand 0 "nonmemory_operand")
375    (ior (match_operand 0 "register_operand")
376	 (and (match_operand 0 "const_double_operand")
377	      (match_test "GET_MODE_SIZE (mode) <= 8")))))
378
379;; Returns nonzero if OP is either a symbol reference or a sum of a symbol
380;; reference and a constant.
381(define_predicate "symbolic_operand"
382  (match_code "symbol_ref,label_ref,const")
383{
384  switch (GET_CODE (op))
385    {
386    case SYMBOL_REF:
387    case LABEL_REF:
388      return 1;
389
390    case CONST:
391      op = XEXP (op, 0);
392      if (GET_CODE (op) == SYMBOL_REF
393	  || GET_CODE (op) == LABEL_REF
394	  || (GET_CODE (op) == UNSPEC
395	      && (XINT (op, 1) == UNSPEC_GOT
396		  || XINT (op, 1) == UNSPEC_GOTOFF
397		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
398	return 1;
399      if (GET_CODE (op) != PLUS
400	  || !CONST_INT_P (XEXP (op, 1)))
401	return 0;
402
403      op = XEXP (op, 0);
404      if (GET_CODE (op) == SYMBOL_REF
405	  || GET_CODE (op) == LABEL_REF)
406	return 1;
407      /* Only @GOTOFF gets offsets.  */
408      if (GET_CODE (op) != UNSPEC
409	  || XINT (op, 1) != UNSPEC_GOTOFF)
410	return 0;
411
412      op = XVECEXP (op, 0, 0);
413      if (GET_CODE (op) == SYMBOL_REF
414	  || GET_CODE (op) == LABEL_REF)
415	return 1;
416      return 0;
417
418    default:
419      gcc_unreachable ();
420    }
421})
422
423;; Return true if the operand contains a @GOT or @GOTOFF reference.
424(define_predicate "pic_symbolic_operand"
425  (match_code "const")
426{
427  op = XEXP (op, 0);
428  if (TARGET_64BIT)
429    {
430      if (GET_CODE (op) == UNSPEC
431	  && XINT (op, 1) == UNSPEC_GOTPCREL)
432	return 1;
433      if (GET_CODE (op) == PLUS
434	  && GET_CODE (XEXP (op, 0)) == UNSPEC
435	  && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL)
436	return 1;
437    }
438  else
439    {
440      if (GET_CODE (op) == UNSPEC)
441	return 1;
442      if (GET_CODE (op) != PLUS
443	  || !CONST_INT_P (XEXP (op, 1)))
444	return 0;
445      op = XEXP (op, 0);
446      if (GET_CODE (op) == UNSPEC
447	  && XINT (op, 1) != UNSPEC_MACHOPIC_OFFSET)
448	return 1;
449    }
450  return 0;
451})
452
453;; Return true if OP is a symbolic operand that resolves locally.
454(define_predicate "local_symbolic_operand"
455  (match_code "const,label_ref,symbol_ref")
456{
457  if (GET_CODE (op) == CONST
458      && GET_CODE (XEXP (op, 0)) == PLUS
459      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
460    op = XEXP (XEXP (op, 0), 0);
461
462  if (GET_CODE (op) == LABEL_REF)
463    return 1;
464
465  if (GET_CODE (op) != SYMBOL_REF)
466    return 0;
467
468  if (SYMBOL_REF_TLS_MODEL (op) != 0)
469    return 0;
470
471  if (SYMBOL_REF_LOCAL_P (op))
472    return 1;
473
474  /* There is, however, a not insubstantial body of code in the rest of
475     the compiler that assumes it can just stick the results of
476     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
477  /* ??? This is a hack.  Should update the body of the compiler to
478     always create a DECL an invoke targetm.encode_section_info.  */
479  if (strncmp (XSTR (op, 0), internal_label_prefix,
480	       internal_label_prefix_len) == 0)
481    return 1;
482
483  return 0;
484})
485
486;; Test for a legitimate @GOTOFF operand.
487;;
488;; VxWorks does not impose a fixed gap between segments; the run-time
489;; gap can be different from the object-file gap.  We therefore can't
490;; use @GOTOFF unless we are absolutely sure that the symbol is in the
491;; same segment as the GOT.  Unfortunately, the flexibility of linker
492;; scripts means that we can't be sure of that in general, so assume
493;; that @GOTOFF is never valid on VxWorks.
494(define_predicate "gotoff_operand"
495  (and (match_test "!TARGET_VXWORKS_RTP")
496       (match_operand 0 "local_symbolic_operand")))
497
498;; Test for various thread-local symbols.
499(define_predicate "tls_symbolic_operand"
500  (and (match_code "symbol_ref")
501       (match_test "SYMBOL_REF_TLS_MODEL (op) != 0")))
502
503(define_predicate "tls_modbase_operand"
504  (and (match_code "symbol_ref")
505       (match_test "op == ix86_tls_module_base ()")))
506
507(define_predicate "tp_or_register_operand"
508  (ior (match_operand 0 "register_operand")
509       (and (match_code "unspec")
510	    (match_test "XINT (op, 1) == UNSPEC_TP"))))
511
512;; Test for a pc-relative call operand
513(define_predicate "constant_call_address_operand"
514  (match_code "symbol_ref")
515{
516  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC)
517    return false;
518  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
519    return false;
520  return true;
521})
522
523;; True for any non-virtual or eliminable register.  Used in places where
524;; instantiation of such a register may cause the pattern to not be recognized.
525(define_predicate "register_no_elim_operand"
526  (match_operand 0 "register_operand")
527{
528  if (GET_CODE (op) == SUBREG)
529    op = SUBREG_REG (op);
530  return !(op == arg_pointer_rtx
531	   || op == frame_pointer_rtx
532	   || IN_RANGE (REGNO (op),
533			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
534})
535
536;; P6 processors will jump to the address after the decrement when %esp
537;; is used as a call operand, so they will execute return address as a code.
538;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
539
540(define_predicate "call_register_no_elim_operand"
541  (match_operand 0 "register_operand")
542{
543  if (GET_CODE (op) == SUBREG)
544    op = SUBREG_REG (op);
545
546  if (!TARGET_64BIT && op == stack_pointer_rtx)
547    return 0;
548
549  return register_no_elim_operand (op, mode);
550})
551
552;; Similarly, but include the stack pointer.  This is used to prevent esp
553;; from being used as an index reg.
554(define_predicate "index_register_operand"
555  (match_operand 0 "register_operand")
556{
557  if (GET_CODE (op) == SUBREG)
558    op = SUBREG_REG (op);
559  if (reload_in_progress || reload_completed)
560    return REG_OK_FOR_INDEX_STRICT_P (op);
561  else
562    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
563})
564
565;; Return false if this is any eliminable register.  Otherwise general_operand.
566(define_predicate "general_no_elim_operand"
567  (if_then_else (match_code "reg,subreg")
568    (match_operand 0 "register_no_elim_operand")
569    (match_operand 0 "general_operand")))
570
571;; Return false if this is any eliminable register.  Otherwise
572;; register_operand or a constant.
573(define_predicate "nonmemory_no_elim_operand"
574  (ior (match_operand 0 "register_no_elim_operand")
575       (match_operand 0 "immediate_operand")))
576
577;; Test for a valid operand for a call instruction.
578(define_predicate "call_insn_operand"
579  (ior (match_operand 0 "constant_call_address_operand")
580       (match_operand 0 "call_register_no_elim_operand")
581       (match_operand 0 "memory_operand")))
582
583;; Similarly, but for tail calls, in which we cannot allow memory references.
584(define_predicate "sibcall_insn_operand"
585  (ior (match_operand 0 "constant_call_address_operand")
586       (match_operand 0 "register_no_elim_operand")))
587
588;; Match exactly zero.
589(define_predicate "const0_operand"
590  (match_code "const_int,const_double,const_vector")
591{
592  if (mode == VOIDmode)
593    mode = GET_MODE (op);
594  return op == CONST0_RTX (mode);
595})
596
597;; Match exactly one.
598(define_predicate "const1_operand"
599  (and (match_code "const_int")
600       (match_test "op == const1_rtx")))
601
602;; Match exactly eight.
603(define_predicate "const8_operand"
604  (and (match_code "const_int")
605       (match_test "INTVAL (op) == 8")))
606
607;; Match exactly 128.
608(define_predicate "const128_operand"
609  (and (match_code "const_int")
610       (match_test "INTVAL (op) == 128")))
611
612;; Match 2, 4, or 8.  Used for leal multiplicands.
613(define_predicate "const248_operand"
614  (match_code "const_int")
615{
616  HOST_WIDE_INT i = INTVAL (op);
617  return i == 2 || i == 4 || i == 8;
618})
619
620;; Match 0 or 1.
621(define_predicate "const_0_to_1_operand"
622  (and (match_code "const_int")
623       (match_test "op == const0_rtx || op == const1_rtx")))
624
625;; Match 0 to 3.
626(define_predicate "const_0_to_3_operand"
627  (and (match_code "const_int")
628       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
629
630;; Match 0 to 7.
631(define_predicate "const_0_to_7_operand"
632  (and (match_code "const_int")
633       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
634
635;; Match 0 to 15.
636(define_predicate "const_0_to_15_operand"
637  (and (match_code "const_int")
638       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
639
640;; Match 0 to 31.
641(define_predicate "const_0_to_31_operand"
642  (and (match_code "const_int")
643       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
644
645;; Match 0 to 63.
646(define_predicate "const_0_to_63_operand"
647  (and (match_code "const_int")
648       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
649
650;; Match 0 to 255.
651(define_predicate "const_0_to_255_operand"
652  (and (match_code "const_int")
653       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
654
655;; Match (0 to 255) * 8
656(define_predicate "const_0_to_255_mul_8_operand"
657  (match_code "const_int")
658{
659  unsigned HOST_WIDE_INT val = INTVAL (op);
660  return val <= 255*8 && val % 8 == 0;
661})
662
663;; Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand
664;; for shift & compare patterns, as shifting by 0 does not change flags).
665(define_predicate "const_1_to_31_operand"
666  (and (match_code "const_int")
667       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
668
669;; Return nonzero if OP is CONST_INT >= 1 and <= 63 (a valid operand
670;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
671(define_predicate "const_1_to_63_operand"
672  (and (match_code "const_int")
673       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
674
675;; Match 2 or 3.
676(define_predicate "const_2_to_3_operand"
677  (and (match_code "const_int")
678       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
679
680;; Match 4 to 5.
681(define_predicate "const_4_to_5_operand"
682  (and (match_code "const_int")
683       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
684
685;; Match 4 to 7.
686(define_predicate "const_4_to_7_operand"
687  (and (match_code "const_int")
688       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
689
690;; Match 6 to 7.
691(define_predicate "const_6_to_7_operand"
692  (and (match_code "const_int")
693       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
694
695;; Match 8 to 11.
696(define_predicate "const_8_to_11_operand"
697  (and (match_code "const_int")
698       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
699
700;; Match 12 to 15.
701(define_predicate "const_12_to_15_operand"
702  (and (match_code "const_int")
703       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
704
705;; Match exactly one bit in 2-bit mask.
706(define_predicate "const_pow2_1_to_2_operand"
707  (and (match_code "const_int")
708       (match_test "INTVAL (op) == 1 || INTVAL (op) == 2")))
709
710;; Match exactly one bit in 4-bit mask.
711(define_predicate "const_pow2_1_to_8_operand"
712  (match_code "const_int")
713{
714  unsigned int log = exact_log2 (INTVAL (op));
715  return log <= 3;
716})
717
718;; Match exactly one bit in 8-bit mask.
719(define_predicate "const_pow2_1_to_128_operand"
720  (match_code "const_int")
721{
722  unsigned int log = exact_log2 (INTVAL (op));
723  return log <= 7;
724})
725
726;; Match exactly one bit in 16-bit mask.
727(define_predicate "const_pow2_1_to_32768_operand"
728  (match_code "const_int")
729{
730  unsigned int log = exact_log2 (INTVAL (op));
731  return log <= 15;
732})
733
734;; True if this is a constant appropriate for an increment or decrement.
735(define_predicate "incdec_operand"
736  (match_code "const_int")
737{
738  /* On Pentium4, the inc and dec operations causes extra dependency on flag
739     registers, since carry flag is not set.  */
740  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
741    return 0;
742  return op == const1_rtx || op == constm1_rtx;
743})
744
745;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
746(define_predicate "reg_or_pm1_operand"
747  (ior (match_operand 0 "register_operand")
748       (and (match_code "const_int")
749	    (match_test "op == const1_rtx || op == constm1_rtx"))))
750
751;; True if OP is acceptable as operand of DImode shift expander.
752(define_predicate "shiftdi_operand"
753  (if_then_else (match_test "TARGET_64BIT")
754    (match_operand 0 "nonimmediate_operand")
755    (match_operand 0 "register_operand")))
756
757(define_predicate "ashldi_input_operand"
758  (if_then_else (match_test "TARGET_64BIT")
759    (match_operand 0 "nonimmediate_operand")
760    (match_operand 0 "reg_or_pm1_operand")))
761
762;; Return true if OP is a vector load from the constant pool with just
763;; the first element nonzero.
764(define_predicate "zero_extended_scalar_load_operand"
765  (match_code "mem")
766{
767  unsigned n_elts;
768  op = maybe_get_pool_constant (op);
769
770  if (!(op && GET_CODE (op) == CONST_VECTOR))
771    return 0;
772
773  n_elts = CONST_VECTOR_NUNITS (op);
774
775  for (n_elts--; n_elts > 0; n_elts--)
776    {
777      rtx elt = CONST_VECTOR_ELT (op, n_elts);
778      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
779	return 0;
780    }
781  return 1;
782})
783
784/* Return true if operand is a vector constant that is all ones. */
785(define_predicate "vector_all_ones_operand"
786  (match_code "const_vector")
787{
788  int nunits = GET_MODE_NUNITS (mode);
789
790  if (GET_CODE (op) == CONST_VECTOR
791      && CONST_VECTOR_NUNITS (op) == nunits)
792    {
793      int i;
794      for (i = 0; i < nunits; ++i)
795        {
796          rtx x = CONST_VECTOR_ELT (op, i);
797          if (x != constm1_rtx)
798            return 0;
799        }
800      return 1;
801    }
802
803  return 0;
804})
805
806; Return 1 when OP is operand acceptable for standard SSE move.
807(define_predicate "vector_move_operand"
808  (ior (match_operand 0 "nonimmediate_operand")
809       (match_operand 0 "const0_operand")))
810
811;; Return 1 when OP is nonimmediate or standard SSE constant.
812(define_predicate "nonimmediate_or_sse_const_operand"
813  (match_operand 0 "general_operand")
814{
815  if (nonimmediate_operand (op, mode))
816    return 1;
817  if (standard_sse_constant_p (op) > 0)
818    return 1;
819  return 0;
820})
821
822;; Return true if OP is a register or a zero.
823(define_predicate "reg_or_0_operand"
824  (ior (match_operand 0 "register_operand")
825       (match_operand 0 "const0_operand")))
826
827;; Return true if op if a valid address, and does not contain
828;; a segment override.
829(define_special_predicate "no_seg_address_operand"
830  (match_operand 0 "address_operand")
831{
832  struct ix86_address parts;
833  int ok;
834
835  ok = ix86_decompose_address (op, &parts);
836  gcc_assert (ok);
837  return parts.seg == SEG_DEFAULT;
838})
839
840;; Return nonzero if the rtx is known to be at least 32 bits aligned.
841(define_predicate "aligned_operand"
842  (match_operand 0 "general_operand")
843{
844  struct ix86_address parts;
845  int ok;
846
847  /* Registers and immediate operands are always "aligned".  */
848  if (!MEM_P (op))
849    return 1;
850
851  /* All patterns using aligned_operand on memory operands ends up
852     in promoting memory operand to 64bit and thus causing memory mismatch.  */
853  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
854    return 0;
855
856  /* Don't even try to do any aligned optimizations with volatiles.  */
857  if (MEM_VOLATILE_P (op))
858    return 0;
859
860  if (MEM_ALIGN (op) >= 32)
861    return 1;
862
863  op = XEXP (op, 0);
864
865  /* Pushes and pops are only valid on the stack pointer.  */
866  if (GET_CODE (op) == PRE_DEC
867      || GET_CODE (op) == POST_INC)
868    return 1;
869
870  /* Decode the address.  */
871  ok = ix86_decompose_address (op, &parts);
872  gcc_assert (ok);
873
874  /* Look for some component that isn't known to be aligned.  */
875  if (parts.index)
876    {
877      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
878	return 0;
879    }
880  if (parts.base)
881    {
882      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
883	return 0;
884    }
885  if (parts.disp)
886    {
887      if (!CONST_INT_P (parts.disp)
888	  || (INTVAL (parts.disp) & 3) != 0)
889	return 0;
890    }
891
892  /* Didn't find one -- this must be an aligned address.  */
893  return 1;
894})
895
896;; Returns 1 if OP is memory operand with a displacement.
897(define_predicate "memory_displacement_operand"
898  (match_operand 0 "memory_operand")
899{
900  struct ix86_address parts;
901  int ok;
902
903  ok = ix86_decompose_address (XEXP (op, 0), &parts);
904  gcc_assert (ok);
905  return parts.disp != NULL_RTX;
906})
907
908;; Returns 1 if OP is memory operand with a displacement only.
909(define_predicate "memory_displacement_only_operand"
910  (match_operand 0 "memory_operand")
911{
912  struct ix86_address parts;
913  int ok;
914
915  if (TARGET_64BIT)
916    return 0;
917
918  ok = ix86_decompose_address (XEXP (op, 0), &parts);
919  gcc_assert (ok);
920
921  if (parts.base || parts.index)
922    return 0;
923
924  return parts.disp != NULL_RTX;
925})
926
927;; Returns 1 if OP is memory operand which will need zero or
928;; one register at most, not counting stack pointer or frame pointer.
929(define_predicate "cmpxchg8b_pic_memory_operand"
930  (match_operand 0 "memory_operand")
931{
932  struct ix86_address parts;
933  int ok;
934
935  ok = ix86_decompose_address (XEXP (op, 0), &parts);
936  gcc_assert (ok);
937  if (parts.base == NULL_RTX
938      || parts.base == arg_pointer_rtx
939      || parts.base == frame_pointer_rtx
940      || parts.base == hard_frame_pointer_rtx
941      || parts.base == stack_pointer_rtx)
942    return 1;
943
944  if (parts.index == NULL_RTX
945      || parts.index == arg_pointer_rtx
946      || parts.index == frame_pointer_rtx
947      || parts.index == hard_frame_pointer_rtx
948      || parts.index == stack_pointer_rtx)
949    return 1;
950
951  return 0;
952})
953
954
955;; Returns 1 if OP is memory operand that cannot be represented
956;; by the modRM array.
957(define_predicate "long_memory_operand"
958  (and (match_operand 0 "memory_operand")
959       (match_test "memory_address_length (op) != 0")))
960
961;; Return 1 if OP is a comparison operator that can be issued by fcmov.
962(define_predicate "fcmov_comparison_operator"
963  (match_operand 0 "comparison_operator")
964{
965  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
966  enum rtx_code code = GET_CODE (op);
967
968  if (inmode == CCFPmode || inmode == CCFPUmode)
969    {
970      if (!ix86_trivial_fp_comparison_operator (op, mode))
971	return 0;
972      code = ix86_fp_compare_code_to_integer (code);
973    }
974  /* i387 supports just limited amount of conditional codes.  */
975  switch (code)
976    {
977    case LTU: case GTU: case LEU: case GEU:
978      if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode
979	  || inmode == CCCmode)
980	return 1;
981      return 0;
982    case ORDERED: case UNORDERED:
983    case EQ: case NE:
984      return 1;
985    default:
986      return 0;
987    }
988})
989
990;; Return 1 if OP is a comparison that can be used in the CMPSS/CMPPS insns.
991;; The first set are supported directly; the second set can't be done with
992;; full IEEE support, i.e. NaNs.
993
994(define_predicate "sse_comparison_operator"
995  (match_code "eq,lt,le,unordered,ne,unge,ungt,ordered"))
996
997;; Return 1 if OP is a comparison operator that can be issued by
998;; avx predicate generation instructions
999(define_predicate "avx_comparison_float_operator"
1000  (match_code "ne,eq,ge,gt,le,lt,unordered,ordered,uneq,unge,ungt,unle,unlt,ltgt"))
1001
1002(define_predicate "ix86_comparison_int_operator"
1003  (match_code "ne,eq,ge,gt,le,lt"))
1004
1005(define_predicate "ix86_comparison_uns_operator"
1006  (match_code "ne,eq,geu,gtu,leu,ltu"))
1007
1008(define_predicate "bt_comparison_operator"
1009  (match_code "ne,eq"))
1010
1011;; Return 1 if OP is a valid comparison operator in valid mode.
1012(define_predicate "ix86_comparison_operator"
1013  (match_operand 0 "comparison_operator")
1014{
1015  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1016  enum rtx_code code = GET_CODE (op);
1017
1018  if (inmode == CCFPmode || inmode == CCFPUmode)
1019    return ix86_trivial_fp_comparison_operator (op, mode);
1020
1021  switch (code)
1022    {
1023    case EQ: case NE:
1024      return 1;
1025    case LT: case GE:
1026      if (inmode == CCmode || inmode == CCGCmode
1027	  || inmode == CCGOCmode || inmode == CCNOmode)
1028	return 1;
1029      return 0;
1030    case LTU: case GTU: case LEU: case GEU:
1031      if (inmode == CCmode || inmode == CCCmode)
1032	return 1;
1033      return 0;
1034    case ORDERED: case UNORDERED:
1035      if (inmode == CCmode)
1036	return 1;
1037      return 0;
1038    case GT: case LE:
1039      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1040	return 1;
1041      return 0;
1042    default:
1043      return 0;
1044    }
1045})
1046
1047;; Return 1 if OP is a valid comparison operator testing carry flag to be set.
1048(define_predicate "ix86_carry_flag_operator"
1049  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1050{
1051  enum machine_mode inmode = GET_MODE (XEXP (op, 0));
1052  enum rtx_code code = GET_CODE (op);
1053
1054  if (inmode == CCFPmode || inmode == CCFPUmode)
1055    {
1056      if (!ix86_trivial_fp_comparison_operator (op, mode))
1057	return 0;
1058      code = ix86_fp_compare_code_to_integer (code);
1059    }
1060  else if (inmode == CCCmode)
1061   return code == LTU || code == GTU;
1062  else if (inmode != CCmode)
1063    return 0;
1064
1065  return code == LTU;
1066})
1067
1068;; Return 1 if this comparison only requires testing one flag bit.
1069(define_predicate "ix86_trivial_fp_comparison_operator"
1070  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1071
1072;; Return 1 if we know how to do this comparison.  Others require
1073;; testing more than one flag bit, and we let the generic middle-end
1074;; code do that.
1075(define_predicate "ix86_fp_comparison_operator"
1076  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1077                             == IX86_FPCMP_ARITH")
1078               (match_operand 0 "comparison_operator")
1079               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1080
1081;; Same as above, but for swapped comparison used in fp_jcc_4_387.
1082(define_predicate "ix86_swapped_fp_comparison_operator"
1083  (match_operand 0 "comparison_operator")
1084{
1085  enum rtx_code code = GET_CODE (op);
1086  int ret;
1087
1088  PUT_CODE (op, swap_condition (code));
1089  ret = ix86_fp_comparison_operator (op, mode);
1090  PUT_CODE (op, code);
1091  return ret;
1092})
1093
1094;; Nearly general operand, but accept any const_double, since we wish
1095;; to be able to drop them into memory rather than have them get pulled
1096;; into registers.
1097(define_predicate "cmp_fp_expander_operand"
1098  (ior (match_code "const_double")
1099       (match_operand 0 "general_operand")))
1100
1101;; Return true if this is a valid binary floating-point operation.
1102(define_predicate "binary_fp_operator"
1103  (match_code "plus,minus,mult,div"))
1104
1105;; Return true if this is a multiply operation.
1106(define_predicate "mult_operator"
1107  (match_code "mult"))
1108
1109;; Return true if this is a division operation.
1110(define_predicate "div_operator"
1111  (match_code "div"))
1112
1113;; Return true if this is a float extend operation.
1114(define_predicate "float_operator"
1115  (match_code "float"))
1116
1117;; Return true for ARITHMETIC_P.
1118(define_predicate "arith_or_logical_operator"
1119  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1120	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1121
1122;; Return true for COMMUTATIVE_P.
1123(define_predicate "commutative_operator"
1124  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1125
1126;; Return 1 if OP is a binary operator that can be promoted to wider mode.
1127(define_predicate "promotable_binary_operator"
1128  (ior (match_code "plus,and,ior,xor,ashift")
1129       (and (match_code "mult")
1130	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1131
1132(define_predicate "compare_operator"
1133  (match_code "compare"))
1134
1135(define_predicate "absneg_operator"
1136  (match_code "abs,neg"))
1137
1138;; Return 1 if OP is misaligned memory operand
1139(define_predicate "misaligned_operand"
1140  (and (match_code "mem")
1141       (match_test "MEM_ALIGN (op) < GET_MODE_ALIGNMENT (mode)")))
1142
1143;; Return 1 if OP is a emms operation, known to be a PARALLEL.
1144(define_predicate "emms_operation"
1145  (match_code "parallel")
1146{
1147  unsigned i;
1148
1149  if (XVECLEN (op, 0) != 17)
1150    return 0;
1151
1152  for (i = 0; i < 8; i++)
1153    {
1154      rtx elt = XVECEXP (op, 0, i+1);
1155
1156      if (GET_CODE (elt) != CLOBBER
1157	  || GET_CODE (SET_DEST (elt)) != REG
1158	  || GET_MODE (SET_DEST (elt)) != XFmode
1159	  || REGNO (SET_DEST (elt)) != FIRST_STACK_REG + i)
1160        return 0;
1161
1162      elt = XVECEXP (op, 0, i+9);
1163
1164      if (GET_CODE (elt) != CLOBBER
1165	  || GET_CODE (SET_DEST (elt)) != REG
1166	  || GET_MODE (SET_DEST (elt)) != DImode
1167	  || REGNO (SET_DEST (elt)) != FIRST_MMX_REG + i)
1168	return 0;
1169    }
1170  return 1;
1171})
1172
1173;; Return 1 if OP is a vzeroall operation, known to be a PARALLEL.
1174(define_predicate "vzeroall_operation"
1175  (match_code "parallel")
1176{
1177  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1178
1179  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1180    return 0;
1181
1182  for (i = 0; i < nregs; i++)
1183    {
1184      rtx elt = XVECEXP (op, 0, i+1);
1185
1186      if (GET_CODE (elt) != SET
1187	  || GET_CODE (SET_DEST (elt)) != REG
1188	  || GET_MODE (SET_DEST (elt)) != V8SImode
1189	  || REGNO (SET_DEST (elt)) != SSE_REGNO (i)
1190	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1191	return 0;
1192    }
1193  return 1;
1194})
1195
1196;; Return 1 if OP is a vzeroupper operation, known to be a PARALLEL.
1197(define_predicate "vzeroupper_operation"
1198  (match_code "parallel")
1199{
1200  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1201
1202  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1203    return 0;
1204
1205  for (i = 0; i < nregs; i++)
1206    {
1207      rtx elt = XVECEXP (op, 0, i+1);
1208
1209      if (GET_CODE (elt) != CLOBBER
1210	  || GET_CODE (SET_DEST (elt)) != REG
1211	  || GET_MODE (SET_DEST (elt)) != V8SImode
1212	  || REGNO (SET_DEST (elt)) != SSE_REGNO (i))
1213	return 0;
1214    }
1215  return 1;
1216})
1217
1218;; Return 1 if OP is a parallel for a vpermilp[ds] permute.
1219;; ??? It would be much easier if the PARALLEL for a VEC_SELECT
1220;; had a mode, but it doesn't.  So we have 4 copies and install
1221;; the mode by hand.
1222
1223(define_predicate "avx_vpermilp_v8sf_operand"
1224  (and (match_code "parallel")
1225       (match_test "avx_vpermilp_parallel (op, V8SFmode)")))
1226
1227(define_predicate "avx_vpermilp_v4df_operand"
1228  (and (match_code "parallel")
1229       (match_test "avx_vpermilp_parallel (op, V4DFmode)")))
1230
1231(define_predicate "avx_vpermilp_v4sf_operand"
1232  (and (match_code "parallel")
1233       (match_test "avx_vpermilp_parallel (op, V4SFmode)")))
1234
1235(define_predicate "avx_vpermilp_v2df_operand"
1236  (and (match_code "parallel")
1237       (match_test "avx_vpermilp_parallel (op, V2DFmode)")))
1238
1239;; Return 1 if OP is a parallel for a vperm2f128 permute.
1240
1241(define_predicate "avx_vperm2f128_v8sf_operand"
1242  (and (match_code "parallel")
1243       (match_test "avx_vperm2f128_parallel (op, V8SFmode)")))
1244
1245(define_predicate "avx_vperm2f128_v8si_operand"
1246  (and (match_code "parallel")
1247       (match_test "avx_vperm2f128_parallel (op, V8SImode)")))
1248
1249(define_predicate "avx_vperm2f128_v4df_operand"
1250  (and (match_code "parallel")
1251       (match_test "avx_vperm2f128_parallel (op, V4DFmode)")))
1252
1253;; Return 1 if OP is a parallel for a vbroadcast permute.
1254
1255(define_predicate "avx_vbroadcast_operand"
1256  (and (match_code "parallel")
1257       (match_code "const_int" "a"))
1258{
1259  rtx elt = XVECEXP (op, 0, 0);
1260  int i, nelt = XVECLEN (op, 0);
1261
1262  /* Don't bother checking there are the right number of operands,
1263     merely that they're all identical.  */
1264  for (i = 1; i < nelt; ++i)
1265    if (XVECEXP (op, 0, i) != elt)
1266      return false;
1267  return true;
1268})
1269