xref: /llvm-project/llvm/lib/Target/X86/X86InstrCompiler.td (revision 90e9895a9373b3d83eefe15b34d2dc83c7bcc88f)
1//===- X86InstrCompiler.td - Compiler Pseudos and Patterns -*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the various pseudo instructions used by the compiler,
10// as well as Pat patterns used during instruction selection.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Pattern Matching Support
16
17def GetLo32XForm : SDNodeXForm<imm, [{
18  // Transformation function: get the low 32 bits.
19  return getI32Imm((uint32_t)N->getZExtValue(), SDLoc(N));
20}]>;
21
22
23//===----------------------------------------------------------------------===//
24// Random Pseudo Instructions.
25
26// PIC base construction.  This expands to code that looks like this:
27//     call  $next_inst
28//     popl %destreg"
29let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP],
30    SchedRW = [WriteJump] in
31  def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
32                      "", []>;
33
34// ADJCALLSTACKDOWN/UP implicitly use/def ESP because they may be expanded into
35// a stack adjustment and the codegen must know that they may modify the stack
36// pointer before prolog-epilog rewriting occurs.
37// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
38// sub / add which can clobber EFLAGS.
39let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP], SchedRW = [WriteALU] in {
40def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
41                           (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
42                           "#ADJCALLSTACKDOWN", []>, Requires<[NotLP64]>;
43def ADJCALLSTACKUP32   : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
44                           "#ADJCALLSTACKUP",
45                           [(X86callseq_end timm:$amt1, timm:$amt2)]>,
46                           Requires<[NotLP64]>;
47}
48def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
49       (ADJCALLSTACKDOWN32 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[NotLP64]>;
50
51
52// ADJCALLSTACKDOWN/UP implicitly use/def RSP because they may be expanded into
53// a stack adjustment and the codegen must know that they may modify the stack
54// pointer before prolog-epilog rewriting occurs.
55// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
56// sub / add which can clobber EFLAGS.
57let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP], SchedRW = [WriteALU] in {
58def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
59                           (ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
60                           "#ADJCALLSTACKDOWN", []>, Requires<[IsLP64]>;
61def ADJCALLSTACKUP64   : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2),
62                           "#ADJCALLSTACKUP",
63                           [(X86callseq_end timm:$amt1, timm:$amt2)]>,
64                           Requires<[IsLP64]>;
65}
66def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
67        (ADJCALLSTACKDOWN64 i32imm:$amt1, i32imm:$amt2, 0)>, Requires<[IsLP64]>;
68
69let SchedRW = [WriteSystem] in {
70
71// x86-64 va_start lowering magic.
72let hasSideEffects = 1, mayStore = 1, Defs = [EFLAGS] in {
73def VASTART_SAVE_XMM_REGS : I<0, Pseudo,
74                              (outs),
75                              (ins GR8:$al, i8mem:$regsavefi, variable_ops),
76                              "#VASTART_SAVE_XMM_REGS $al, $regsavefi",
77                              [(X86vastart_save_xmm_regs GR8:$al, addr:$regsavefi)]>;
78}
79
80let usesCustomInserter = 1, Defs = [EFLAGS] in {
81// The VAARG_64 and VAARG_X32 pseudo-instructions take the address of the
82// va_list, and place the address of the next argument into a register.
83let Defs = [EFLAGS] in {
84def VAARG_64 : I<0, Pseudo,
85                 (outs GR64:$dst),
86                 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
87                 "#VAARG_64 $dst, $ap, $size, $mode, $align",
88                 [(set GR64:$dst,
89                    (X86vaarg64 addr:$ap, timm:$size, timm:$mode, timm:$align))]>,
90               Requires<[In64BitMode, IsLP64]>;
91def VAARG_X32 : I<0, Pseudo,
92                 (outs GR32:$dst),
93                 (ins i8mem:$ap, i32imm:$size, i8imm:$mode, i32imm:$align),
94                 "#VAARG_X32 $dst, $ap, $size, $mode, $align",
95                 [(set GR32:$dst,
96                    (X86vaargx32 addr:$ap, timm:$size, timm:$mode, timm:$align))]>,
97                Requires<[In64BitMode, NotLP64]>;
98}
99
100// When using segmented stacks these are lowered into instructions which first
101// check if the current stacklet has enough free memory. If it does, memory is
102// allocated by bumping the stack pointer. Otherwise memory is allocated from
103// the heap.
104
105let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
106def SEG_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
107                      "# variable sized alloca for segmented stacks",
108                      [(set GR32:$dst,
109                         (X86SegAlloca GR32:$size))]>,
110                    Requires<[NotLP64]>;
111
112let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
113def SEG_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
114                      "# variable sized alloca for segmented stacks",
115                      [(set GR64:$dst,
116                         (X86SegAlloca GR64:$size))]>,
117                    Requires<[In64BitMode]>;
118
119// To protect against stack clash, dynamic allocation should perform a memory
120// probe at each page.
121
122let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
123def PROBED_ALLOCA_32 : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$size),
124                      "# variable sized alloca with probing",
125                      [(set GR32:$dst,
126                         (X86ProbedAlloca GR32:$size))]>,
127                    Requires<[NotLP64]>;
128
129let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
130def PROBED_ALLOCA_64 : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$size),
131                      "# variable sized alloca with probing",
132                      [(set GR64:$dst,
133                         (X86ProbedAlloca GR64:$size))]>,
134                    Requires<[In64BitMode]>;
135}
136
137let hasNoSchedulingInfo = 1 in
138def STACKALLOC_W_PROBING : I<0, Pseudo, (outs), (ins i64imm:$stacksize),
139                             "# fixed size alloca with probing",
140                             []>;
141
142// Dynamic stack allocation yields a _chkstk or _alloca call for all Windows
143// targets.  These calls are needed to probe the stack when allocating more than
144// 4k bytes in one go. Touching the stack at 4K increments is necessary to
145// ensure that the guard pages used by the OS virtual memory manager are
146// allocated in correct sequence.
147// The main point of having separate instruction are extra unmodelled effects
148// (compared to ordinary calls) like stack pointer change.
149
150let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in
151def DYN_ALLOCA_32 : I<0, Pseudo, (outs), (ins GR32:$size),
152                     "# dynamic stack allocation",
153                     [(X86DynAlloca GR32:$size)]>,
154                     Requires<[NotLP64]>;
155
156let Defs = [RAX, RSP, EFLAGS], Uses = [RSP] in
157def DYN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
158                     "# dynamic stack allocation",
159                     [(X86DynAlloca GR64:$size)]>,
160                     Requires<[In64BitMode]>;
161} // SchedRW
162
163// These instructions XOR the frame pointer into a GPR. They are used in some
164// stack protection schemes. These are post-RA pseudos because we only know the
165// frame register after register allocation.
166let Constraints = "$src = $dst", isMoveImm = 1, isPseudo = 1, Defs = [EFLAGS] in {
167  def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
168                  "xorl\t$$FP, $src", []>,
169                  Requires<[NotLP64]>, Sched<[WriteALU]>;
170  def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
171                  "xorq\t$$FP $src", []>,
172                  Requires<[In64BitMode]>, Sched<[WriteALU]>;
173}
174
175//===----------------------------------------------------------------------===//
176// EH Pseudo Instructions
177//
178let SchedRW = [WriteSystem] in {
179let isTerminator = 1, isReturn = 1, isBarrier = 1,
180    hasCtrlDep = 1, isCodeGenOnly = 1 in {
181def EH_RETURN   : I<0xC3, RawFrm, (outs), (ins GR32:$addr),
182                    "ret\t#eh_return, addr: $addr",
183                    [(X86ehret GR32:$addr)]>, Sched<[WriteJumpLd]>;
184
185}
186
187let isTerminator = 1, isReturn = 1, isBarrier = 1,
188    hasCtrlDep = 1, isCodeGenOnly = 1 in {
189def EH_RETURN64   : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
190                     "ret\t#eh_return, addr: $addr",
191                     [(X86ehret GR64:$addr)]>, Sched<[WriteJumpLd]>;
192
193}
194
195let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
196    isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1 in {
197  def CLEANUPRET : I<0, Pseudo, (outs), (ins), "# CLEANUPRET",
198                     [(cleanupret bb)]>;
199
200  // CATCHRET needs a custom inserter for SEH.
201  let usesCustomInserter = 1 in
202    def CATCHRET : I<0, Pseudo, (outs), (ins brtarget32:$dst, brtarget32:$from),
203                     "# CATCHRET",
204                     [(catchret bb:$dst, bb:$from)]>;
205}
206
207let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
208    usesCustomInserter = 1 in {
209  def EH_SjLj_SetJmp32  : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
210                            "#EH_SJLJ_SETJMP32",
211                            [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
212                          Requires<[Not64BitMode]>;
213  def EH_SjLj_SetJmp64  : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
214                            "#EH_SJLJ_SETJMP64",
215                            [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
216                          Requires<[In64BitMode]>;
217  let isTerminator = 1 in {
218  def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
219                            "#EH_SJLJ_LONGJMP32",
220                            [(X86eh_sjlj_longjmp addr:$buf)]>,
221                          Requires<[Not64BitMode]>;
222  def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
223                            "#EH_SJLJ_LONGJMP64",
224                            [(X86eh_sjlj_longjmp addr:$buf)]>,
225                          Requires<[In64BitMode]>;
226  }
227}
228
229let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
230  def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
231                        "#EH_SjLj_Setup\t$dst", []>;
232}
233} // SchedRW
234
235//===----------------------------------------------------------------------===//
236// Pseudo instructions used by unwind info.
237//
238
239// Prolog instructions should not be duplicated, since this can cause issues
240// because 1) if only some of the instructions are duplicated, then we will
241// observe prolog instructions after the end-prolog instruction and 2) Windows
242// expects there to only be a single prolog (e.g., when checking if unwinding
243// is happening in the middle of a prolog).
244let isPseudo = 1, isMeta = 1, isNotDuplicable = 1, SchedRW = [WriteSystem] in {
245  def SEH_PushReg : I<0, Pseudo, (outs), (ins i32imm:$reg),
246                            "#SEH_PushReg $reg", []>;
247  def SEH_SaveReg : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
248                            "#SEH_SaveReg $reg, $dst", []>;
249  def SEH_SaveXMM : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$dst),
250                            "#SEH_SaveXMM $reg, $dst", []>;
251  def SEH_StackAlloc : I<0, Pseudo, (outs), (ins i32imm:$size),
252                            "#SEH_StackAlloc $size", []>;
253  def SEH_StackAlign : I<0, Pseudo, (outs), (ins i32imm:$align),
254                            "#SEH_StackAlign $align", []>;
255  def SEH_SetFrame : I<0, Pseudo, (outs), (ins i32imm:$reg, i32imm:$offset),
256                            "#SEH_SetFrame $reg, $offset", []>;
257  def SEH_PushFrame : I<0, Pseudo, (outs), (ins i1imm:$mode),
258                            "#SEH_PushFrame $mode", []>;
259  def SEH_EndPrologue : I<0, Pseudo, (outs), (ins),
260                            "#SEH_EndPrologue", []>;
261}
262
263// Epilog instructions:
264let isPseudo = 1, isMeta = 1, SchedRW = [WriteSystem] in {
265  def SEH_Epilogue : I<0, Pseudo, (outs), (ins),
266                            "#SEH_Epilogue", []>;
267}
268
269//===----------------------------------------------------------------------===//
270// Pseudo instructions used by KCFI.
271//===----------------------------------------------------------------------===//
272let
273  Defs = [R10, R11, EFLAGS] in {
274def KCFI_CHECK : PseudoI<
275  (outs), (ins GR64:$ptr, i32imm:$type), []>, Sched<[]>;
276}
277
278//===----------------------------------------------------------------------===//
279// Pseudo instructions used by address sanitizer.
280//===----------------------------------------------------------------------===//
281let
282  Defs = [R10, R11, EFLAGS] in {
283def ASAN_CHECK_MEMACCESS : PseudoI<
284  (outs), (ins GR64PLTSafe:$addr, i32imm:$accessinfo),
285  [(int_asan_check_memaccess GR64PLTSafe:$addr, (i32 timm:$accessinfo))]>,
286  Sched<[]>;
287}
288
289//===----------------------------------------------------------------------===//
290// Pseudo instructions used by segmented stacks.
291//
292
293// This is lowered into a RET instruction by MCInstLower.  We need
294// this so that we don't have to have a MachineBasicBlock which ends
295// with a RET and also has successors.
296let isPseudo = 1, SchedRW = [WriteJumpLd] in {
297def MORESTACK_RET: I<0, Pseudo, (outs), (ins), "", []>;
298
299// This instruction is lowered to a RET followed by a MOV.  The two
300// instructions are not generated on a higher level since then the
301// verifier sees a MachineBasicBlock ending with a non-terminator.
302def MORESTACK_RET_RESTORE_R10 : I<0, Pseudo, (outs), (ins), "", []>;
303}
304
305//===----------------------------------------------------------------------===//
306// Alias Instructions
307//===----------------------------------------------------------------------===//
308
309// Alias instruction mapping movr0 to xor.
310// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
311let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1,
312    isPseudo = 1, isMoveImm = 1, AddedComplexity = 10 in
313def MOV32r0  : I<0, Pseudo, (outs GR32:$dst), (ins), "",
314                 [(set GR32:$dst, 0)]>, Sched<[WriteZero]>;
315
316// Other widths can also make use of the 32-bit xor, which may have a smaller
317// encoding and avoid partial register updates.
318let AddedComplexity = 10 in {
319def : Pat<(i8 0), (EXTRACT_SUBREG (MOV32r0), sub_8bit)>;
320def : Pat<(i16 0), (EXTRACT_SUBREG (MOV32r0), sub_16bit)>;
321def : Pat<(i64 0), (SUBREG_TO_REG (i64 0), (MOV32r0), sub_32bit)>;
322}
323
324let Predicates = [OptForSize, Not64BitMode],
325    AddedComplexity = 10 in {
326  let SchedRW = [WriteALU] in {
327  // Pseudo instructions for materializing 1 and -1 using XOR+INC/DEC,
328  // which only require 3 bytes compared to MOV32ri which requires 5.
329  let Defs = [EFLAGS], isReMaterializable = 1, isPseudo = 1 in {
330    def MOV32r1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
331                        [(set GR32:$dst, 1)]>;
332    def MOV32r_1 : I<0, Pseudo, (outs GR32:$dst), (ins), "",
333                        [(set GR32:$dst, -1)]>;
334  }
335  } // SchedRW
336
337  // MOV16ri is 4 bytes, so the instructions above are smaller.
338  def : Pat<(i16 1), (EXTRACT_SUBREG (MOV32r1), sub_16bit)>;
339  def : Pat<(i16 -1), (EXTRACT_SUBREG (MOV32r_1), sub_16bit)>;
340}
341
342let isReMaterializable = 1, isPseudo = 1, AddedComplexity = 5,
343    SchedRW = [WriteALU] in {
344// AddedComplexity higher than MOV64ri but lower than MOV32r0 and MOV32r1.
345def MOV32ImmSExti8 : I<0, Pseudo, (outs GR32:$dst), (ins i32i8imm:$src), "",
346                       [(set GR32:$dst, i32immSExt8:$src)]>,
347                       Requires<[OptForMinSize, NotWin64WithoutFP]>;
348def MOV64ImmSExti8 : I<0, Pseudo, (outs GR64:$dst), (ins i64i8imm:$src), "",
349                       [(set GR64:$dst, i64immSExt8:$src)]>,
350                       Requires<[OptForMinSize, NotWin64WithoutFP]>;
351}
352
353// Materialize i64 constant where top 32-bits are zero. This could theoretically
354// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
355// that would make it more difficult to rematerialize.
356let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
357    isPseudo = 1, SchedRW = [WriteMove] in
358def MOV32ri64 : I<0, Pseudo, (outs GR64:$dst), (ins i64i32imm:$src), "",
359                  [(set GR64:$dst, i64immZExt32:$src)]>;
360
361// This 64-bit pseudo-move can also be used for labels in the x86-64 small code
362// model.
363def mov64imm32 : ComplexPattern<i64, 1, "selectMOV64Imm32", [X86Wrapper]>;
364def : Pat<(i64 mov64imm32:$src), (MOV32ri64 mov64imm32:$src)>;
365
366// Use sbb to materialize carry bit.
367let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1, SchedRW = [WriteADC],
368    hasSideEffects = 0 in {
369// FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
370// However, Pat<> can't replicate the destination reg into the inputs of the
371// result.
372def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", []>;
373def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", []>;
374} // isCodeGenOnly
375
376//===----------------------------------------------------------------------===//
377// String Pseudo Instructions
378//
379let SchedRW = [WriteMicrocoded] in {
380let Defs = [ECX,EDI,ESI], Uses = [ECX,EDI,ESI], isCodeGenOnly = 1 in {
381def REP_MOVSB_32 : I<0xA4, RawFrm, (outs), (ins),
382                    "{rep;movsb (%esi), %es:(%edi)|rep movsb es:[edi], [esi]}",
383                    [(X86rep_movs i8)]>, REP, AdSize32,
384                   Requires<[NotLP64]>;
385def REP_MOVSW_32 : I<0xA5, RawFrm, (outs), (ins),
386                    "{rep;movsw (%esi), %es:(%edi)|rep movsw es:[edi], [esi]}",
387                    [(X86rep_movs i16)]>, REP, AdSize32, OpSize16,
388                   Requires<[NotLP64]>;
389def REP_MOVSD_32 : I<0xA5, RawFrm, (outs), (ins),
390                    "{rep;movsl (%esi), %es:(%edi)|rep movsd es:[edi], [esi]}",
391                    [(X86rep_movs i32)]>, REP, AdSize32, OpSize32,
392                   Requires<[NotLP64]>;
393def REP_MOVSQ_32 : RI<0xA5, RawFrm, (outs), (ins),
394                    "{rep;movsq (%esi), %es:(%edi)|rep movsq es:[edi], [esi]}",
395                    [(X86rep_movs i64)]>, REP, AdSize32,
396                   Requires<[NotLP64, In64BitMode]>;
397}
398
399let Defs = [RCX,RDI,RSI], Uses = [RCX,RDI,RSI], isCodeGenOnly = 1 in {
400def REP_MOVSB_64 : I<0xA4, RawFrm, (outs), (ins),
401                    "{rep;movsb (%rsi), %es:(%rdi)|rep movsb es:[rdi], [rsi]}",
402                    [(X86rep_movs i8)]>, REP, AdSize64,
403                   Requires<[IsLP64]>;
404def REP_MOVSW_64 : I<0xA5, RawFrm, (outs), (ins),
405                    "{rep;movsw (%rsi), %es:(%rdi)|rep movsw es:[rdi], [rsi]}",
406                    [(X86rep_movs i16)]>, REP, AdSize64, OpSize16,
407                   Requires<[IsLP64]>;
408def REP_MOVSD_64 : I<0xA5, RawFrm, (outs), (ins),
409                    "{rep;movsl (%rsi), %es:(%rdi)|rep movsdi es:[rdi], [rsi]}",
410                    [(X86rep_movs i32)]>, REP, AdSize64, OpSize32,
411                   Requires<[IsLP64]>;
412def REP_MOVSQ_64 : RI<0xA5, RawFrm, (outs), (ins),
413                    "{rep;movsq (%rsi), %es:(%rdi)|rep movsq es:[rdi], [rsi]}",
414                    [(X86rep_movs i64)]>, REP, AdSize64,
415                   Requires<[IsLP64]>;
416}
417
418// FIXME: Should use "(X86rep_stos AL)" as the pattern.
419let Defs = [ECX,EDI], isCodeGenOnly = 1 in {
420  let Uses = [AL,ECX,EDI] in
421  def REP_STOSB_32 : I<0xAA, RawFrm, (outs), (ins),
422                       "{rep;stosb %al, %es:(%edi)|rep stosb es:[edi], al}",
423                      [(X86rep_stos i8)]>, REP, AdSize32,
424                     Requires<[NotLP64]>;
425  let Uses = [AX,ECX,EDI] in
426  def REP_STOSW_32 : I<0xAB, RawFrm, (outs), (ins),
427                      "{rep;stosw %ax, %es:(%edi)|rep stosw es:[edi], ax}",
428                      [(X86rep_stos i16)]>, REP, AdSize32, OpSize16,
429                     Requires<[NotLP64]>;
430  let Uses = [EAX,ECX,EDI] in
431  def REP_STOSD_32 : I<0xAB, RawFrm, (outs), (ins),
432                      "{rep;stosl %eax, %es:(%edi)|rep stosd es:[edi], eax}",
433                      [(X86rep_stos i32)]>, REP, AdSize32, OpSize32,
434                     Requires<[NotLP64]>;
435  let Uses = [RAX,RCX,RDI] in
436  def REP_STOSQ_32 : RI<0xAB, RawFrm, (outs), (ins),
437                        "{rep;stosq %rax, %es:(%edi)|rep stosq es:[edi], rax}",
438                        [(X86rep_stos i64)]>, REP, AdSize32,
439                        Requires<[NotLP64, In64BitMode]>;
440}
441
442let Defs = [RCX,RDI], isCodeGenOnly = 1 in {
443  let Uses = [AL,RCX,RDI] in
444  def REP_STOSB_64 : I<0xAA, RawFrm, (outs), (ins),
445                       "{rep;stosb %al, %es:(%rdi)|rep stosb es:[rdi], al}",
446                       [(X86rep_stos i8)]>, REP, AdSize64,
447                       Requires<[IsLP64]>;
448  let Uses = [AX,RCX,RDI] in
449  def REP_STOSW_64 : I<0xAB, RawFrm, (outs), (ins),
450                       "{rep;stosw %ax, %es:(%rdi)|rep stosw es:[rdi], ax}",
451                       [(X86rep_stos i16)]>, REP, AdSize64, OpSize16,
452                       Requires<[IsLP64]>;
453  let Uses = [RAX,RCX,RDI] in
454  def REP_STOSD_64 : I<0xAB, RawFrm, (outs), (ins),
455                      "{rep;stosl %eax, %es:(%rdi)|rep stosd es:[rdi], eax}",
456                       [(X86rep_stos i32)]>, REP, AdSize64, OpSize32,
457                       Requires<[IsLP64]>;
458
459  let Uses = [RAX,RCX,RDI] in
460  def REP_STOSQ_64 : RI<0xAB, RawFrm, (outs), (ins),
461                        "{rep;stosq %rax, %es:(%rdi)|rep stosq es:[rdi], rax}",
462                        [(X86rep_stos i64)]>, REP, AdSize64,
463                        Requires<[IsLP64]>;
464}
465} // SchedRW
466
467//===----------------------------------------------------------------------===//
468// Thread Local Storage Instructions
469//
470let SchedRW = [WriteSystem] in {
471
472// ELF TLS Support
473// All calls clobber the non-callee saved registers. ESP is marked as
474// a use to prevent stack-pointer assignments that appear immediately
475// before calls from potentially appearing dead.
476let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
477            ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
478            MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
479            XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
480            XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
481    Uses = [ESP, SSP] in {
482def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
483                  "# TLS_addr32",
484                  [(X86tlsaddr tls32addr:$sym)]>,
485                  Requires<[Not64BitMode]>;
486def TLS_base_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
487                  "# TLS_base_addr32",
488                  [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
489                  Requires<[Not64BitMode]>;
490}
491
492// All calls clobber the non-callee saved registers. RSP is marked as
493// a use to prevent stack-pointer assignments that appear immediately
494// before calls from potentially appearing dead.
495let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
496            FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
497            ST0, ST1, ST2, ST3, ST4, ST5, ST6, ST7,
498            MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
499            XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
500            XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS, DF],
501    Uses = [RSP, SSP] in {
502def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
503                   "# TLS_addr64",
504                  [(X86tlsaddr tls64addr:$sym)]>,
505                  Requires<[In64BitMode, IsLP64]>;
506def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
507                   "# TLS_base_addr64",
508                  [(X86tlsbaseaddr tls64baseaddr:$sym)]>,
509                  Requires<[In64BitMode, IsLP64]>;
510def TLS_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
511                   "# TLS_addrX32",
512                  [(X86tlsaddr tls32addr:$sym)]>,
513                  Requires<[In64BitMode, NotLP64]>;
514def TLS_base_addrX32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
515                   "# TLS_base_addrX32",
516                  [(X86tlsbaseaddr tls32baseaddr:$sym)]>,
517                  Requires<[In64BitMode, NotLP64]>;
518}
519
520// TLSDESC only clobbers EAX and EFLAGS. ESP is marked as a use to prevent
521// stack-pointer assignments that appear immediately before calls from
522// potentially appearing dead.
523let Defs = [EAX, EFLAGS], Uses = [RSP, SSP] in {
524  def TLS_desc32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
525                     "# TLS_desc32", [(X86tlsdesc tls32addr:$sym)]>;
526  def TLS_desc64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
527                     "# TLS_desc64", [(X86tlsdesc tls64addr:$sym)]>;
528}
529
530// Darwin TLS Support
531// For i386, the address of the thunk is passed on the stack, on return the
532// address of the variable is in %eax.  %ecx is trashed during the function
533// call.  All other registers are preserved.
534let Defs = [EAX, ECX, EFLAGS, DF],
535    Uses = [ESP, SSP],
536    usesCustomInserter = 1 in
537def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
538                "# TLSCall_32",
539                [(X86TLSCall addr:$sym)]>,
540                Requires<[Not64BitMode]>;
541
542// For x86_64, the address of the thunk is passed in %rdi, but the
543// pseudo directly use the symbol, so do not add an implicit use of
544// %rdi. The lowering will do the right thing with RDI.
545// On return the address of the variable is in %rax.  All other
546// registers are preserved.
547let Defs = [RAX, EFLAGS, DF],
548    Uses = [RSP, SSP],
549    usesCustomInserter = 1 in
550def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
551                  "# TLSCall_64",
552                  [(X86TLSCall addr:$sym)]>,
553                  Requires<[In64BitMode]>;
554} // SchedRW
555
556//===----------------------------------------------------------------------===//
557// Conditional Move Pseudo Instructions
558
559// CMOV* - Used to implement the SELECT DAG operation.  Expanded after
560// instruction selection into a branch sequence.
561multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
562  def CMOV#NAME  : I<0, Pseudo,
563                    (outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
564                    "#CMOV_"#NAME#" PSEUDO!",
565                    [(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
566                                                EFLAGS)))]>;
567}
568
569let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
570  // X86 doesn't have 8-bit conditional moves. Use a customInserter to
571  // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
572  // however that requires promoting the operands, and can induce additional
573  // i8 register pressure.
574  defm _GR8 : CMOVrr_PSEUDO<GR8, i8>;
575
576  let Predicates = [NoCMOV] in {
577    defm _GR32 : CMOVrr_PSEUDO<GR32, i32>;
578    defm _GR16 : CMOVrr_PSEUDO<GR16, i16>;
579  } // Predicates = [NoCMOV]
580
581  // fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no
582  // SSE1/SSE2.
583  let Predicates = [FPStackf32] in
584    defm _RFP32 : CMOVrr_PSEUDO<RFP32, f32>;
585
586  let Predicates = [FPStackf64] in
587    defm _RFP64 : CMOVrr_PSEUDO<RFP64, f64>;
588
589  defm _RFP80 : CMOVrr_PSEUDO<RFP80, f80>;
590
591  let Predicates = [HasMMX] in
592    defm _VR64   : CMOVrr_PSEUDO<VR64, x86mmx>;
593
594  let Predicates = [HasSSE1,NoAVX512] in
595    defm _FR32   : CMOVrr_PSEUDO<FR32, f32>;
596  let Predicates = [HasSSE2,NoAVX512] in {
597    defm _FR16   : CMOVrr_PSEUDO<FR16, f16>;
598    defm _FR64   : CMOVrr_PSEUDO<FR64, f64>;
599  }
600  let Predicates = [HasAVX512] in {
601    defm _FR16X  : CMOVrr_PSEUDO<FR16X, f16>;
602    defm _FR32X  : CMOVrr_PSEUDO<FR32X, f32>;
603    defm _FR64X  : CMOVrr_PSEUDO<FR64X, f64>;
604  }
605  let Predicates = [NoVLX] in {
606    defm _VR128  : CMOVrr_PSEUDO<VR128, v2i64>;
607    defm _VR256  : CMOVrr_PSEUDO<VR256, v4i64>;
608  }
609  let Predicates = [HasVLX] in {
610    defm _VR128X : CMOVrr_PSEUDO<VR128X, v2i64>;
611    defm _VR256X : CMOVrr_PSEUDO<VR256X, v4i64>;
612  }
613  defm _VR512  : CMOVrr_PSEUDO<VR512, v8i64>;
614  defm _VK1    : CMOVrr_PSEUDO<VK1,  v1i1>;
615  defm _VK2    : CMOVrr_PSEUDO<VK2,  v2i1>;
616  defm _VK4    : CMOVrr_PSEUDO<VK4,  v4i1>;
617  defm _VK8    : CMOVrr_PSEUDO<VK8,  v8i1>;
618  defm _VK16   : CMOVrr_PSEUDO<VK16, v16i1>;
619  defm _VK32   : CMOVrr_PSEUDO<VK32, v32i1>;
620  defm _VK64   : CMOVrr_PSEUDO<VK64, v64i1>;
621} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
622
623def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
624          (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
625
626let Predicates = [NoVLX] in {
627  def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
628            (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
629  def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
630            (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
631  def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
632            (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
633  def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
634            (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
635  def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
636            (CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
637
638  def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
639            (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
640  def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
641            (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
642  def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
643            (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
644  def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
645            (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
646  def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
647            (CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
648}
649let Predicates = [HasVLX] in {
650  def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
651            (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
652  def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
653            (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
654  def : Pat<(v8f16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
655            (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
656  def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
657            (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
658  def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
659            (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
660  def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
661            (CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
662
663  def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
664            (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
665  def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
666            (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
667  def : Pat<(v16f16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
668            (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
669  def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
670            (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
671  def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
672            (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
673  def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
674            (CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
675}
676
677def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
678          (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
679def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
680          (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
681def : Pat<(v32f16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
682          (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
683def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
684          (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
685def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
686          (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
687def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
688          (CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
689
690//===----------------------------------------------------------------------===//
691// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
692//===----------------------------------------------------------------------===//
693
694// FIXME: Use normal instructions and add lock prefix dynamically.
695
696// Memory barriers
697
698let isCodeGenOnly = 1, Defs = [EFLAGS] in
699def OR32mi8Locked  : Ii8<0x83, MRM1m, (outs), (ins i32mem:$dst, i32i8imm:$zero),
700                         "or{l}\t{$zero, $dst|$dst, $zero}", []>,
701                         Requires<[Not64BitMode]>, OpSize32, LOCK,
702                         Sched<[WriteALURMW]>;
703
704// RegOpc corresponds to the mr version of the instruction
705// ImmOpc corresponds to the mi version of the instruction
706// ImmOpc8 corresponds to the mi8 version of the instruction
707// ImmMod corresponds to the instruction format of the mi and mi8 versions
708multiclass LOCK_ArithBinOp<bits<8> RegOpc, bits<8> ImmOpc, bits<8> ImmOpc8,
709                           Format ImmMod, SDNode Op, string mnemonic> {
710let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
711    SchedRW = [WriteALURMW] in {
712
713def NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
714                  RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 },
715                  MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2),
716                  !strconcat(mnemonic, "{b}\t",
717                             "{$src2, $dst|$dst, $src2}"),
718                  [(set EFLAGS, (Op addr:$dst, GR8:$src2))]>, LOCK;
719
720def NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
721                   RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
722                   MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2),
723                   !strconcat(mnemonic, "{w}\t",
724                              "{$src2, $dst|$dst, $src2}"),
725                   [(set EFLAGS, (Op addr:$dst, GR16:$src2))]>,
726                   OpSize16, LOCK;
727
728def NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
729                   RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
730                   MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2),
731                   !strconcat(mnemonic, "{l}\t",
732                              "{$src2, $dst|$dst, $src2}"),
733                   [(set EFLAGS, (Op addr:$dst, GR32:$src2))]>,
734                   OpSize32, LOCK;
735
736def NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4},
737                    RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 },
738                    MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
739                    !strconcat(mnemonic, "{q}\t",
740                               "{$src2, $dst|$dst, $src2}"),
741                    [(set EFLAGS, (Op addr:$dst, GR64:$src2))]>, LOCK;
742
743// NOTE: These are order specific, we want the mi8 forms to be listed
744// first so that they are slightly preferred to the mi forms.
745def NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
746                      ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
747                      ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2),
748                      !strconcat(mnemonic, "{w}\t",
749                                 "{$src2, $dst|$dst, $src2}"),
750                      [(set EFLAGS, (Op addr:$dst, i16immSExt8:$src2))]>,
751                      OpSize16, LOCK;
752
753def NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
754                      ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
755                      ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2),
756                      !strconcat(mnemonic, "{l}\t",
757                                 "{$src2, $dst|$dst, $src2}"),
758                      [(set EFLAGS, (Op addr:$dst, i32immSExt8:$src2))]>,
759                      OpSize32, LOCK;
760
761def NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4},
762                       ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 },
763                       ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2),
764                       !strconcat(mnemonic, "{q}\t",
765                                  "{$src2, $dst|$dst, $src2}"),
766                       [(set EFLAGS, (Op addr:$dst, i64immSExt8:$src2))]>,
767                       LOCK;
768
769def NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
770                    ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 },
771                    ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2),
772                    !strconcat(mnemonic, "{b}\t",
773                               "{$src2, $dst|$dst, $src2}"),
774                    [(set EFLAGS, (Op addr:$dst, (i8 imm:$src2)))]>, LOCK;
775
776def NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
777                      ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
778                      ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2),
779                      !strconcat(mnemonic, "{w}\t",
780                                 "{$src2, $dst|$dst, $src2}"),
781                      [(set EFLAGS, (Op addr:$dst, (i16 imm:$src2)))]>,
782                      OpSize16, LOCK;
783
784def NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
785                      ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
786                      ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2),
787                      !strconcat(mnemonic, "{l}\t",
788                                 "{$src2, $dst|$dst, $src2}"),
789                      [(set EFLAGS, (Op addr:$dst, (i32 imm:$src2)))]>,
790                      OpSize32, LOCK;
791
792def NAME#64mi32 : RIi32S<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4},
793                          ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 },
794                          ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2),
795                          !strconcat(mnemonic, "{q}\t",
796                                     "{$src2, $dst|$dst, $src2}"),
797                          [(set EFLAGS, (Op addr:$dst, i64immSExt32:$src2))]>,
798                          LOCK;
799}
800
801}
802
803defm LOCK_ADD : LOCK_ArithBinOp<0x00, 0x80, 0x83, MRM0m, X86lock_add, "add">;
804defm LOCK_SUB : LOCK_ArithBinOp<0x28, 0x80, 0x83, MRM5m, X86lock_sub, "sub">;
805defm LOCK_OR  : LOCK_ArithBinOp<0x08, 0x80, 0x83, MRM1m, X86lock_or , "or">;
806defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, X86lock_and, "and">;
807defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, X86lock_xor, "xor">;
808
809let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
810    SchedRW = [WriteALURMW]  in {
811  let Predicates = [UseIncDec] in {
812    def LOCK_INC8m  : I<0xFE, MRM0m, (outs), (ins i8mem :$dst),
813                        "inc{b}\t$dst",
814                        [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i8 1)))]>,
815                        LOCK;
816    def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst),
817                        "inc{w}\t$dst",
818                        [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i16 1)))]>,
819                        OpSize16, LOCK;
820    def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst),
821                        "inc{l}\t$dst",
822                        [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i32 1)))]>,
823                        OpSize32, LOCK;
824
825    def LOCK_DEC8m  : I<0xFE, MRM1m, (outs), (ins i8mem :$dst),
826                        "dec{b}\t$dst",
827                        [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i8 1)))]>,
828                        LOCK;
829    def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst),
830                        "dec{w}\t$dst",
831                        [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i16 1)))]>,
832                        OpSize16, LOCK;
833    def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst),
834                        "dec{l}\t$dst",
835                        [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i32 1)))]>,
836                        OpSize32, LOCK;
837  }
838
839  let Predicates = [UseIncDec, In64BitMode] in {
840    def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst),
841                         "inc{q}\t$dst",
842                         [(set EFLAGS, (X86lock_add_nocf addr:$dst, (i64 1)))]>,
843                         LOCK;
844    def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst),
845                         "dec{q}\t$dst",
846                         [(set EFLAGS, (X86lock_sub_nocf addr:$dst, (i64 1)))]>,
847                         LOCK;
848  }
849}
850
851let Predicates = [UseIncDec] in {
852  // Additional patterns for -1 constant.
853  def : Pat<(X86lock_add addr:$dst, (i8  -1)), (LOCK_DEC8m  addr:$dst)>;
854  def : Pat<(X86lock_add addr:$dst, (i16 -1)), (LOCK_DEC16m addr:$dst)>;
855  def : Pat<(X86lock_add addr:$dst, (i32 -1)), (LOCK_DEC32m addr:$dst)>;
856  def : Pat<(X86lock_sub addr:$dst, (i8  -1)), (LOCK_INC8m  addr:$dst)>;
857  def : Pat<(X86lock_sub addr:$dst, (i16 -1)), (LOCK_INC16m addr:$dst)>;
858  def : Pat<(X86lock_sub addr:$dst, (i32 -1)), (LOCK_INC32m addr:$dst)>;
859}
860
861let Predicates = [UseIncDec, In64BitMode] in {
862  // Additional patterns for -1 constant.
863  def : Pat<(X86lock_add addr:$dst, (i64 -1)), (LOCK_DEC64m addr:$dst)>;
864  def : Pat<(X86lock_sub addr:$dst, (i64 -1)), (LOCK_INC64m addr:$dst)>;
865}
866
867// Atomic bit test.
868def X86LBTest : SDTypeProfile<1, 3, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
869                                     SDTCisVT<2, i8>, SDTCisVT<3, i32>]>;
870def x86bts : SDNode<"X86ISD::LBTS", X86LBTest,
871                    [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
872def x86btc : SDNode<"X86ISD::LBTC", X86LBTest,
873                    [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
874def x86btr : SDNode<"X86ISD::LBTR", X86LBTest,
875                    [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
876
877def X86LBTestRM : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>,
878                                       SDTCisInt<2>]>;
879
880def x86_rm_bts : SDNode<"X86ISD::LBTS_RM", X86LBTestRM,
881                        [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
882def x86_rm_btc : SDNode<"X86ISD::LBTC_RM", X86LBTestRM,
883                        [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
884def x86_rm_btr : SDNode<"X86ISD::LBTR_RM", X86LBTestRM,
885                        [SDNPHasChain, SDNPMayLoad, SDNPMayStore, SDNPMemOperand]>;
886
887
888multiclass ATOMIC_LOGIC_OP<Format Form, string s> {
889  let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
890      SchedRW = [WriteBitTestSetRegRMW]  in {
891    def 16m : Ii8<0xBA, Form, (outs), (ins i16mem:$src1, i8imm:$src2),
892                  !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
893                  [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 16)))]>,
894              OpSize16, TB, LOCK;
895    def 32m : Ii8<0xBA, Form, (outs), (ins i32mem:$src1, i8imm:$src2),
896                  !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
897                  [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 32)))]>,
898              OpSize32, TB, LOCK;
899    def 64m : RIi8<0xBA, Form, (outs), (ins i64mem:$src1, i8imm:$src2),
900                   !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
901                   [(set EFLAGS, (!cast<SDNode>("x86" # s) addr:$src1, timm:$src2, (i32 64)))]>,
902              TB, LOCK;
903  }
904}
905
906multiclass ATOMIC_LOGIC_OP_RM<bits<8> Opc8, string s> {
907  let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1,
908      SchedRW = [WriteBitTestSetRegRMW]  in {
909    def 16rm : I<Opc8, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2),
910                  !strconcat(s, "{w}\t{$src2, $src1|$src1, $src2}"),
911                  [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR16:$src2))]>,
912               OpSize16, TB, LOCK;
913    def 32rm : I<Opc8, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2),
914                  !strconcat(s, "{l}\t{$src2, $src1|$src1, $src2}"),
915                  [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR32:$src2))]>,
916               OpSize32, TB, LOCK;
917    def 64rm : RI<Opc8, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2),
918                   !strconcat(s, "{q}\t{$src2, $src1|$src1, $src2}"),
919                   [(set EFLAGS, (!cast<SDNode>("x86_rm_" # s) addr:$src1, GR64:$src2))]>,
920               TB, LOCK;
921  }
922}
923
924
925defm LOCK_BTS : ATOMIC_LOGIC_OP<MRM5m, "bts">;
926defm LOCK_BTC : ATOMIC_LOGIC_OP<MRM7m, "btc">;
927defm LOCK_BTR : ATOMIC_LOGIC_OP<MRM6m, "btr">;
928
929defm LOCK_BTS_RM : ATOMIC_LOGIC_OP_RM<0xAB, "bts">;
930defm LOCK_BTC_RM : ATOMIC_LOGIC_OP_RM<0xBB, "btc">;
931defm LOCK_BTR_RM : ATOMIC_LOGIC_OP_RM<0xB3, "btr">;
932
933// Atomic compare and swap.
934multiclass LCMPXCHG_BinOp<bits<8> Opc8, bits<8> Opc, Format Form,
935                          string mnemonic, SDPatternOperator frag> {
936let isCodeGenOnly = 1, SchedRW = [WriteCMPXCHGRMW] in {
937  let Defs = [AL, EFLAGS], Uses = [AL] in
938  def NAME#8  : I<Opc8, Form, (outs), (ins i8mem:$ptr, GR8:$swap),
939                  !strconcat(mnemonic, "{b}\t{$swap, $ptr|$ptr, $swap}"),
940                  [(frag addr:$ptr, GR8:$swap, 1)]>, TB, LOCK;
941  let Defs = [AX, EFLAGS], Uses = [AX] in
942  def NAME#16 : I<Opc, Form, (outs), (ins i16mem:$ptr, GR16:$swap),
943                  !strconcat(mnemonic, "{w}\t{$swap, $ptr|$ptr, $swap}"),
944                  [(frag addr:$ptr, GR16:$swap, 2)]>, TB, OpSize16, LOCK;
945  let Defs = [EAX, EFLAGS], Uses = [EAX] in
946  def NAME#32 : I<Opc, Form, (outs), (ins i32mem:$ptr, GR32:$swap),
947                  !strconcat(mnemonic, "{l}\t{$swap, $ptr|$ptr, $swap}"),
948                  [(frag addr:$ptr, GR32:$swap, 4)]>, TB, OpSize32, LOCK;
949  let Defs = [RAX, EFLAGS], Uses = [RAX] in
950  def NAME#64 : RI<Opc, Form, (outs), (ins i64mem:$ptr, GR64:$swap),
951                   !strconcat(mnemonic, "{q}\t{$swap, $ptr|$ptr, $swap}"),
952                   [(frag addr:$ptr, GR64:$swap, 8)]>, TB, LOCK;
953}
954}
955
956let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX],
957    Predicates = [HasCX8], SchedRW = [WriteCMPXCHGRMW],
958    isCodeGenOnly = 1, usesCustomInserter = 1 in {
959def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr),
960                   "cmpxchg8b\t$ptr",
961                   [(X86cas8 addr:$ptr)]>, TB, LOCK;
962}
963
964let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX],
965    Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
966    isCodeGenOnly = 1, mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
967def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr),
968                     "cmpxchg16b\t$ptr",
969                     []>, TB, LOCK;
970}
971
972// This pseudo must be used when the frame uses RBX as
973// the base pointer. Indeed, in such situation RBX is a reserved
974// register and the register allocator will ignore any use/def of
975// it. In other words, the register will not fix the clobbering of
976// RBX that will happen when setting the arguments for the instrucion.
977//
978// Unlike the actual related instruction, we mark that this one
979// defines RBX (instead of using RBX).
980// The rationale is that we will define RBX during the expansion of
981// the pseudo. The argument feeding RBX is rbx_input.
982//
983// The additional argument, $rbx_save, is a temporary register used to
984// save the value of RBX across the actual instruction.
985//
986// To make sure the register assigned to $rbx_save does not interfere with
987// the definition of the actual instruction, we use a definition $dst which
988// is tied to $rbx_save. That way, the live-range of $rbx_save spans across
989// the instruction and we are sure we will have a valid register to restore
990// the value of RBX.
991let Defs = [RAX, RDX, RBX, EFLAGS], Uses = [RAX, RCX, RDX],
992    Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
993    isCodeGenOnly = 1, isPseudo = 1,
994    mayLoad = 1, mayStore = 1, hasSideEffects = 0,
995    Constraints = "$rbx_save = $dst" in {
996def LCMPXCHG16B_SAVE_RBX :
997    I<0, Pseudo, (outs GR64:$dst),
998      (ins i128mem:$ptr, GR64:$rbx_input, GR64:$rbx_save), "", []>;
999}
1000
1001// Pseudo instruction that doesn't read/write RBX. Will be turned into either
1002// LCMPXCHG16B_SAVE_RBX or LCMPXCHG16B via a custom inserter.
1003let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RCX, RDX],
1004    Predicates = [HasCX16,In64BitMode], SchedRW = [WriteCMPXCHGRMW],
1005    isCodeGenOnly = 1, isPseudo = 1,
1006    mayLoad = 1, mayStore = 1, hasSideEffects = 0,
1007    usesCustomInserter = 1 in {
1008def LCMPXCHG16B_NO_RBX :
1009    I<0, Pseudo, (outs), (ins i128mem:$ptr, GR64:$rbx_input), "",
1010      [(X86cas16 addr:$ptr, GR64:$rbx_input)]>;
1011}
1012
1013// This pseudo must be used when the frame uses RBX/EBX as
1014// the base pointer.
1015// cf comment for LCMPXCHG16B_SAVE_RBX.
1016let Defs = [EBX], Uses = [ECX, EAX],
1017    Predicates = [HasMWAITX], SchedRW = [WriteSystem],
1018    isCodeGenOnly = 1, isPseudo = 1, Constraints = "$rbx_save = $dst" in {
1019def MWAITX_SAVE_RBX :
1020    I<0, Pseudo, (outs GR64:$dst),
1021      (ins GR32:$ebx_input, GR64:$rbx_save),
1022      "mwaitx",
1023      []>;
1024}
1025
1026// Pseudo mwaitx instruction to use for custom insertion.
1027let Predicates = [HasMWAITX], SchedRW = [WriteSystem],
1028    isCodeGenOnly = 1, isPseudo = 1,
1029    usesCustomInserter = 1 in {
1030def MWAITX :
1031    I<0, Pseudo, (outs), (ins GR32:$ecx, GR32:$eax, GR32:$ebx),
1032      "mwaitx",
1033      [(int_x86_mwaitx GR32:$ecx, GR32:$eax, GR32:$ebx)]>;
1034}
1035
1036
1037defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", X86cas>;
1038
1039// Atomic exchange and add
1040multiclass ATOMIC_RMW_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
1041                            string frag> {
1042  let Constraints = "$val = $dst", Defs = [EFLAGS], mayLoad = 1, mayStore = 1,
1043      isCodeGenOnly = 1, SchedRW = [WriteALURMW] in {
1044    def NAME#8  : I<opc8, MRMSrcMem, (outs GR8:$dst),
1045                    (ins GR8:$val, i8mem:$ptr),
1046                    !strconcat(mnemonic, "{b}\t{$val, $ptr|$ptr, $val}"),
1047                    [(set GR8:$dst,
1048                          (!cast<PatFrag>(frag # "_i8") addr:$ptr, GR8:$val))]>;
1049    def NAME#16 : I<opc, MRMSrcMem, (outs GR16:$dst),
1050                    (ins GR16:$val, i16mem:$ptr),
1051                    !strconcat(mnemonic, "{w}\t{$val, $ptr|$ptr, $val}"),
1052                    [(set
1053                       GR16:$dst,
1054                       (!cast<PatFrag>(frag # "_i16") addr:$ptr, GR16:$val))]>,
1055                    OpSize16;
1056    def NAME#32 : I<opc, MRMSrcMem, (outs GR32:$dst),
1057                    (ins GR32:$val, i32mem:$ptr),
1058                    !strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
1059                    [(set
1060                       GR32:$dst,
1061                       (!cast<PatFrag>(frag # "_i32") addr:$ptr, GR32:$val))]>,
1062                    OpSize32;
1063    def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
1064                     (ins GR64:$val, i64mem:$ptr),
1065                     !strconcat(mnemonic, "{q}\t{$val, $ptr|$ptr, $val}"),
1066                     [(set
1067                        GR64:$dst,
1068                        (!cast<PatFrag>(frag # "_i64") addr:$ptr, GR64:$val))]>;
1069  }
1070}
1071
1072defm LXADD : ATOMIC_RMW_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
1073
1074/* The following multiclass tries to make sure that in code like
1075 *    x.store (immediate op x.load(acquire), release)
1076 * and
1077 *    x.store (register op x.load(acquire), release)
1078 * an operation directly on memory is generated instead of wasting a register.
1079 * It is not automatic as atomic_store/load are only lowered to MOV instructions
1080 * extremely late to prevent them from being accidentally reordered in the backend
1081 * (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
1082 */
1083multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
1084  def : Pat<(atomic_store_8 (op (atomic_load_8 addr:$dst), (i8 imm:$src)),
1085                            addr:$dst),
1086            (!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
1087  def : Pat<(atomic_store_16 (op (atomic_load_16 addr:$dst), (i16 imm:$src)),
1088                             addr:$dst),
1089            (!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
1090  def : Pat<(atomic_store_32 (op (atomic_load_32 addr:$dst), (i32 imm:$src)),
1091                             addr:$dst),
1092            (!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
1093  def : Pat<(atomic_store_64 (op (atomic_load_64 addr:$dst), (i64immSExt32:$src)),
1094                             addr:$dst),
1095            (!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
1096  def : Pat<(atomic_store_8 (op (atomic_load_8 addr:$dst), (i8 GR8:$src)), addr:$dst),
1097            (!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
1098  def : Pat<(atomic_store_16 (op (atomic_load_16 addr:$dst), (i16 GR16:$src)),
1099                             addr:$dst),
1100            (!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
1101  def : Pat<(atomic_store_32 (op (atomic_load_32 addr:$dst), (i32 GR32:$src)),
1102                             addr:$dst),
1103            (!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
1104  def : Pat<(atomic_store_64 (op (atomic_load_64 addr:$dst), (i64 GR64:$src)),
1105                             addr:$dst),
1106            (!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
1107}
1108defm : RELEASE_BINOP_MI<"ADD", add>;
1109defm : RELEASE_BINOP_MI<"AND", and>;
1110defm : RELEASE_BINOP_MI<"OR",  or>;
1111defm : RELEASE_BINOP_MI<"XOR", xor>;
1112defm : RELEASE_BINOP_MI<"SUB", sub>;
1113
1114// Atomic load + floating point patterns.
1115// FIXME: This could also handle SIMD operations with *ps and *pd instructions.
1116multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
1117  def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1118            (!cast<Instruction>(Name#"SSrm") FR32:$src1, addr:$src2)>,
1119            Requires<[UseSSE1]>;
1120  def : Pat<(op FR32:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1121            (!cast<Instruction>("V"#Name#"SSrm") FR32:$src1, addr:$src2)>,
1122            Requires<[UseAVX]>;
1123  def : Pat<(op FR32X:$src1, (bitconvert (i32 (atomic_load_32 addr:$src2)))),
1124            (!cast<Instruction>("V"#Name#"SSZrm") FR32X:$src1, addr:$src2)>,
1125            Requires<[HasAVX512]>;
1126
1127  def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1128            (!cast<Instruction>(Name#"SDrm") FR64:$src1, addr:$src2)>,
1129            Requires<[UseSSE1]>;
1130  def : Pat<(op FR64:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1131            (!cast<Instruction>("V"#Name#"SDrm") FR64:$src1, addr:$src2)>,
1132            Requires<[UseAVX]>;
1133  def : Pat<(op FR64X:$src1, (bitconvert (i64 (atomic_load_64 addr:$src2)))),
1134            (!cast<Instruction>("V"#Name#"SDZrm") FR64X:$src1, addr:$src2)>,
1135            Requires<[HasAVX512]>;
1136}
1137defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
1138defm : ATOMIC_LOAD_FP_BINOP_MI<"SUB", fsub>;
1139defm : ATOMIC_LOAD_FP_BINOP_MI<"MUL", fmul>;
1140defm : ATOMIC_LOAD_FP_BINOP_MI<"DIV", fdiv>;
1141
1142multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
1143                        dag dag64> {
1144  def : Pat<(atomic_store_8 dag8, addr:$dst),
1145            (!cast<Instruction>(Name#8m) addr:$dst)>;
1146  def : Pat<(atomic_store_16 dag16, addr:$dst),
1147            (!cast<Instruction>(Name#16m) addr:$dst)>;
1148  def : Pat<(atomic_store_32 dag32, addr:$dst),
1149            (!cast<Instruction>(Name#32m) addr:$dst)>;
1150  def : Pat<(atomic_store_64 dag64, addr:$dst),
1151            (!cast<Instruction>(Name#64m) addr:$dst)>;
1152}
1153
1154let Predicates = [UseIncDec] in {
1155  defm : RELEASE_UNOP<"INC",
1156      (add (atomic_load_8  addr:$dst), (i8 1)),
1157      (add (atomic_load_16 addr:$dst), (i16 1)),
1158      (add (atomic_load_32 addr:$dst), (i32 1)),
1159      (add (atomic_load_64 addr:$dst), (i64 1))>;
1160  defm : RELEASE_UNOP<"DEC",
1161      (add (atomic_load_8  addr:$dst), (i8 -1)),
1162      (add (atomic_load_16 addr:$dst), (i16 -1)),
1163      (add (atomic_load_32 addr:$dst), (i32 -1)),
1164      (add (atomic_load_64 addr:$dst), (i64 -1))>;
1165}
1166
1167defm : RELEASE_UNOP<"NEG",
1168    (ineg (i8 (atomic_load_8  addr:$dst))),
1169    (ineg (i16 (atomic_load_16 addr:$dst))),
1170    (ineg (i32 (atomic_load_32 addr:$dst))),
1171    (ineg (i64 (atomic_load_64 addr:$dst)))>;
1172defm : RELEASE_UNOP<"NOT",
1173    (not (i8 (atomic_load_8  addr:$dst))),
1174    (not (i16 (atomic_load_16 addr:$dst))),
1175    (not (i32 (atomic_load_32 addr:$dst))),
1176    (not (i64 (atomic_load_64 addr:$dst)))>;
1177
1178def : Pat<(atomic_store_8 (i8 imm:$src), addr:$dst),
1179          (MOV8mi addr:$dst, imm:$src)>;
1180def : Pat<(atomic_store_16 (i16 imm:$src), addr:$dst),
1181          (MOV16mi addr:$dst, imm:$src)>;
1182def : Pat<(atomic_store_32 (i32 imm:$src), addr:$dst),
1183          (MOV32mi addr:$dst, imm:$src)>;
1184def : Pat<(atomic_store_64 (i64immSExt32:$src), addr:$dst),
1185          (MOV64mi32 addr:$dst, i64immSExt32:$src)>;
1186
1187def : Pat<(atomic_store_8 GR8:$src, addr:$dst),
1188          (MOV8mr addr:$dst, GR8:$src)>;
1189def : Pat<(atomic_store_16 GR16:$src, addr:$dst),
1190          (MOV16mr addr:$dst, GR16:$src)>;
1191def : Pat<(atomic_store_32 GR32:$src, addr:$dst),
1192          (MOV32mr addr:$dst, GR32:$src)>;
1193def : Pat<(atomic_store_64 GR64:$src, addr:$dst),
1194          (MOV64mr addr:$dst, GR64:$src)>;
1195
1196def : Pat<(i8  (atomic_load_8 addr:$src)),  (MOV8rm addr:$src)>;
1197def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
1198def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
1199def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
1200
1201// Floating point loads/stores.
1202def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1203          (MOVSSmr addr:$dst, FR32:$src)>, Requires<[UseSSE1]>;
1204def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1205          (VMOVSSmr addr:$dst, FR32:$src)>, Requires<[UseAVX]>;
1206def : Pat<(atomic_store_32 (i32 (bitconvert (f32 FR32:$src))), addr:$dst),
1207          (VMOVSSZmr addr:$dst, FR32:$src)>, Requires<[HasAVX512]>;
1208
1209def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1210          (MOVSDmr addr:$dst, FR64:$src)>, Requires<[UseSSE2]>;
1211def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1212          (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[UseAVX]>;
1213def : Pat<(atomic_store_64 (i64 (bitconvert (f64 FR64:$src))), addr:$dst),
1214          (VMOVSDmr addr:$dst, FR64:$src)>, Requires<[HasAVX512]>;
1215
1216def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1217          (MOVSSrm_alt addr:$src)>, Requires<[UseSSE1]>;
1218def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1219          (VMOVSSrm_alt addr:$src)>, Requires<[UseAVX]>;
1220def : Pat<(f32 (bitconvert (i32 (atomic_load_32 addr:$src)))),
1221          (VMOVSSZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1222
1223def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1224          (MOVSDrm_alt addr:$src)>, Requires<[UseSSE2]>;
1225def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1226          (VMOVSDrm_alt addr:$src)>, Requires<[UseAVX]>;
1227def : Pat<(f64 (bitconvert (i64 (atomic_load_64 addr:$src)))),
1228          (VMOVSDZrm_alt addr:$src)>, Requires<[HasAVX512]>;
1229
1230//===----------------------------------------------------------------------===//
1231// DAG Pattern Matching Rules
1232//===----------------------------------------------------------------------===//
1233
1234// Use AND/OR to store 0/-1 in memory when optimizing for minsize. This saves
1235// binary size compared to a regular MOV, but it introduces an unnecessary
1236// load, so is not suitable for regular or optsize functions.
1237let Predicates = [OptForMinSize] in {
1238def : Pat<(simple_store (i16 0), addr:$dst), (AND16mi addr:$dst, 0)>;
1239def : Pat<(simple_store (i32 0), addr:$dst), (AND32mi addr:$dst, 0)>;
1240def : Pat<(simple_store (i64 0), addr:$dst), (AND64mi32 addr:$dst, 0)>;
1241def : Pat<(simple_store (i16 -1), addr:$dst), (OR16mi addr:$dst, -1)>;
1242def : Pat<(simple_store (i32 -1), addr:$dst), (OR32mi addr:$dst, -1)>;
1243def : Pat<(simple_store (i64 -1), addr:$dst), (OR64mi32 addr:$dst, -1)>;
1244}
1245
1246// In kernel code model, we can get the address of a label
1247// into a register with 'movq'.  FIXME: This is a hack, the 'imm' predicate of
1248// the MOV64ri32 should accept these.
1249def : Pat<(i64 (X86Wrapper tconstpool  :$dst)),
1250          (MOV64ri32 tconstpool  :$dst)>, Requires<[KernelCode]>;
1251def : Pat<(i64 (X86Wrapper tjumptable  :$dst)),
1252          (MOV64ri32 tjumptable  :$dst)>, Requires<[KernelCode]>;
1253def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
1254          (MOV64ri32 tglobaladdr :$dst)>, Requires<[KernelCode]>;
1255def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
1256          (MOV64ri32 texternalsym:$dst)>, Requires<[KernelCode]>;
1257def : Pat<(i64 (X86Wrapper mcsym:$dst)),
1258          (MOV64ri32 mcsym:$dst)>, Requires<[KernelCode]>;
1259def : Pat<(i64 (X86Wrapper tblockaddress:$dst)),
1260          (MOV64ri32 tblockaddress:$dst)>, Requires<[KernelCode]>;
1261
1262// If we have small model and -static mode, it is safe to store global addresses
1263// directly as immediates.  FIXME: This is really a hack, the 'imm' predicate
1264// for MOV64mi32 should handle this sort of thing.
1265def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
1266          (MOV64mi32 addr:$dst, tconstpool:$src)>,
1267          Requires<[NearData, IsNotPIC]>;
1268def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
1269          (MOV64mi32 addr:$dst, tjumptable:$src)>,
1270          Requires<[NearData, IsNotPIC]>;
1271def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
1272          (MOV64mi32 addr:$dst, tglobaladdr:$src)>,
1273          Requires<[NearData, IsNotPIC]>;
1274def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
1275          (MOV64mi32 addr:$dst, texternalsym:$src)>,
1276          Requires<[NearData, IsNotPIC]>;
1277def : Pat<(store (i64 (X86Wrapper mcsym:$src)), addr:$dst),
1278          (MOV64mi32 addr:$dst, mcsym:$src)>,
1279          Requires<[NearData, IsNotPIC]>;
1280def : Pat<(store (i64 (X86Wrapper tblockaddress:$src)), addr:$dst),
1281          (MOV64mi32 addr:$dst, tblockaddress:$src)>,
1282          Requires<[NearData, IsNotPIC]>;
1283
1284def : Pat<(i32 (X86RecoverFrameAlloc mcsym:$dst)), (MOV32ri mcsym:$dst)>;
1285def : Pat<(i64 (X86RecoverFrameAlloc mcsym:$dst)), (MOV64ri mcsym:$dst)>;
1286
1287// Calls
1288
1289// tls has some funny stuff here...
1290// This corresponds to movabs $foo@tpoff, %rax
1291def : Pat<(i64 (X86Wrapper tglobaltlsaddr :$dst)),
1292          (MOV64ri32 tglobaltlsaddr :$dst)>;
1293// This corresponds to add $foo@tpoff, %rax
1294def : Pat<(add GR64:$src1, (X86Wrapper tglobaltlsaddr :$dst)),
1295          (ADD64ri32 GR64:$src1, tglobaltlsaddr :$dst)>;
1296
1297
1298// Direct PC relative function call for small code model. 32-bit displacement
1299// sign extended to 64-bit.
1300def : Pat<(X86call (i64 tglobaladdr:$dst)),
1301          (CALL64pcrel32 tglobaladdr:$dst)>;
1302def : Pat<(X86call (i64 texternalsym:$dst)),
1303          (CALL64pcrel32 texternalsym:$dst)>;
1304
1305def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 texternalsym:$dst)),
1306          (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, texternalsym:$dst)>;
1307def : Pat<(X86call_rvmarker (i64 tglobaladdr:$rvfunc), (i64 tglobaladdr:$dst)),
1308          (CALL64pcrel32_RVMARKER tglobaladdr:$rvfunc, tglobaladdr:$dst)>;
1309
1310
1311// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they
1312// can never use callee-saved registers. That is the purpose of the GR64_TC
1313// register classes.
1314//
1315// The only volatile register that is never used by the calling convention is
1316// %r11. This happens when calling a vararg function with 6 arguments.
1317//
1318// Match an X86tcret that uses less than 7 volatile registers.
1319def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1320          (TCRETURNri ptr_rc_tailcall:$dst, timm:$off)>,
1321          Requires<[Not64BitMode, NotUseIndirectThunkCalls]>;
1322
1323// FIXME: This is disabled for 32-bit PIC mode because the global base
1324// register which is part of the address mode may be assigned a
1325// callee-saved register.
1326// Similar to X86tcret_6regs, here we only have 1 register left
1327def : Pat<(X86tcret_1reg (load addr:$dst), timm:$off),
1328          (TCRETURNmi addr:$dst, timm:$off)>,
1329          Requires<[Not64BitMode, IsNotPIC, NotUseIndirectThunkCalls]>;
1330
1331def : Pat<(X86tcret (i32 tglobaladdr:$dst), timm:$off),
1332          (TCRETURNdi tglobaladdr:$dst, timm:$off)>,
1333          Requires<[NotLP64]>;
1334
1335def : Pat<(X86tcret (i32 texternalsym:$dst), timm:$off),
1336          (TCRETURNdi texternalsym:$dst, timm:$off)>,
1337          Requires<[NotLP64]>;
1338
1339def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1340          (TCRETURNri64 ptr_rc_tailcall:$dst, timm:$off)>,
1341          Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1342
1343// Don't fold loads into X86tcret requiring more than 6 regs.
1344// There wouldn't be enough scratch registers for base+index.
1345def : Pat<(X86tcret_6regs (load addr:$dst), timm:$off),
1346          (TCRETURNmi64 addr:$dst, timm:$off)>,
1347          Requires<[In64BitMode, NotUseIndirectThunkCalls]>;
1348
1349def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1350          (INDIRECT_THUNK_TCRETURN64 ptr_rc_tailcall:$dst, timm:$off)>,
1351          Requires<[In64BitMode, UseIndirectThunkCalls]>;
1352
1353def : Pat<(X86tcret ptr_rc_tailcall:$dst, timm:$off),
1354          (INDIRECT_THUNK_TCRETURN32 ptr_rc_tailcall:$dst, timm:$off)>,
1355          Requires<[Not64BitMode, UseIndirectThunkCalls]>;
1356
1357def : Pat<(X86tcret (i64 tglobaladdr:$dst), timm:$off),
1358          (TCRETURNdi64 tglobaladdr:$dst, timm:$off)>,
1359          Requires<[IsLP64]>;
1360
1361def : Pat<(X86tcret (i64 texternalsym:$dst), timm:$off),
1362          (TCRETURNdi64 texternalsym:$dst, timm:$off)>,
1363          Requires<[IsLP64]>;
1364
1365// Normal calls, with various flavors of addresses.
1366def : Pat<(X86call (i32 tglobaladdr:$dst)),
1367          (CALLpcrel32 tglobaladdr:$dst)>;
1368def : Pat<(X86call (i32 texternalsym:$dst)),
1369          (CALLpcrel32 texternalsym:$dst)>;
1370def : Pat<(X86call (i32 imm:$dst)),
1371          (CALLpcrel32 imm:$dst)>, Requires<[CallImmAddr]>;
1372
1373// Comparisons.
1374
1375// TEST R,R is smaller than CMP R,0
1376def : Pat<(X86cmp GR8:$src1, 0),
1377          (TEST8rr GR8:$src1, GR8:$src1)>;
1378def : Pat<(X86cmp GR16:$src1, 0),
1379          (TEST16rr GR16:$src1, GR16:$src1)>;
1380def : Pat<(X86cmp GR32:$src1, 0),
1381          (TEST32rr GR32:$src1, GR32:$src1)>;
1382def : Pat<(X86cmp GR64:$src1, 0),
1383          (TEST64rr GR64:$src1, GR64:$src1)>;
1384
1385// zextload bool -> zextload byte
1386// i1 stored in one byte in zero-extended form.
1387// Upper bits cleanup should be executed before Store.
1388def : Pat<(zextloadi8i1  addr:$src), (MOV8rm addr:$src)>;
1389def : Pat<(zextloadi16i1 addr:$src),
1390          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1391def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;
1392def : Pat<(zextloadi64i1 addr:$src),
1393          (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1394
1395// extload bool -> extload byte
1396// When extloading from 16-bit and smaller memory locations into 64-bit
1397// registers, use zero-extending loads so that the entire 64-bit register is
1398// defined, avoiding partial-register updates.
1399
1400def : Pat<(extloadi8i1 addr:$src),   (MOV8rm      addr:$src)>;
1401def : Pat<(extloadi16i1 addr:$src),
1402          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1403def : Pat<(extloadi32i1 addr:$src),  (MOVZX32rm8  addr:$src)>;
1404def : Pat<(extloadi16i8 addr:$src),
1405          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1406def : Pat<(extloadi32i8 addr:$src),  (MOVZX32rm8  addr:$src)>;
1407def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>;
1408
1409// For other extloads, use subregs, since the high contents of the register are
1410// defined after an extload.
1411// NOTE: The extloadi64i32 pattern needs to be first as it will try to form
1412// 32-bit loads for 4 byte aligned i8/i16 loads.
1413def : Pat<(extloadi64i32 addr:$src),
1414          (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>;
1415def : Pat<(extloadi64i1 addr:$src),
1416          (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1417def : Pat<(extloadi64i8 addr:$src),
1418          (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>;
1419def : Pat<(extloadi64i16 addr:$src),
1420          (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>;
1421
1422// anyext. Define these to do an explicit zero-extend to
1423// avoid partial-register updates.
1424def : Pat<(i16 (anyext GR8 :$src)), (EXTRACT_SUBREG
1425                                     (MOVZX32rr8 GR8 :$src), sub_16bit)>;
1426def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8  GR8 :$src)>;
1427
1428// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
1429def : Pat<(i32 (anyext GR16:$src)),
1430          (INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
1431
1432def : Pat<(i64 (anyext GR8 :$src)),
1433          (SUBREG_TO_REG (i64 0), (MOVZX32rr8  GR8  :$src), sub_32bit)>;
1434def : Pat<(i64 (anyext GR16:$src)),
1435          (SUBREG_TO_REG (i64 0), (MOVZX32rr16 GR16 :$src), sub_32bit)>;
1436def : Pat<(i64 (anyext GR32:$src)),
1437          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GR32:$src, sub_32bit)>;
1438
1439def : Pat<(i32 (anyext_sdiv GR8:$src)), (MOVSX32rr8 GR8:$src)>;
1440
1441// In the case of a 32-bit def that is known to implicitly zero-extend,
1442// we can use a SUBREG_TO_REG.
1443def : Pat<(i64 (zext def32:$src)),
1444          (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1445def : Pat<(i64 (and (anyext def32:$src), 0x00000000FFFFFFFF)),
1446          (SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
1447
1448//===----------------------------------------------------------------------===//
1449// Pattern match OR as ADD
1450//===----------------------------------------------------------------------===//
1451
1452// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
1453// 3-addressified into an LEA instruction to avoid copies.  However, we also
1454// want to finally emit these instructions as an or at the end of the code
1455// generator to make the generated code easier to read.  To do this, we select
1456// into "disjoint bits" pseudo ops.
1457
1458// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
1459// Try this before the selecting to OR.
1460let SchedRW = [WriteALU] in {
1461
1462let isConvertibleToThreeAddress = 1, isPseudo = 1,
1463    Constraints = "$src1 = $dst", Defs = [EFLAGS] in {
1464let isCommutable = 1 in {
1465def ADD8rr_DB   : I<0, Pseudo, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2),
1466                    "", // orb/addb REG, REG
1467                    [(set GR8:$dst, (or_is_add GR8:$src1, GR8:$src2))]>;
1468def ADD16rr_DB  : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1469                    "", // orw/addw REG, REG
1470                    [(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
1471def ADD32rr_DB  : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1472                    "", // orl/addl REG, REG
1473                    [(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
1474def ADD64rr_DB  : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1475                    "", // orq/addq REG, REG
1476                    [(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
1477} // isCommutable
1478
1479def ADD8ri_DB :   I<0, Pseudo,
1480                    (outs GR8:$dst), (ins GR8:$src1, i8imm:$src2),
1481                    "", // orb/addb REG, imm8
1482                    [(set GR8:$dst, (or_is_add GR8:$src1, imm:$src2))]>;
1483def ADD16ri_DB  : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2),
1484                    "", // orw/addw REG, imm
1485                    [(set GR16:$dst, (or_is_add GR16:$src1, imm:$src2))]>;
1486def ADD32ri_DB  : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2),
1487                    "", // orl/addl REG, imm
1488                    [(set GR32:$dst, (or_is_add GR32:$src1, imm:$src2))]>;
1489def ADD64ri32_DB : I<0, Pseudo,
1490                     (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2),
1491                     "", // orq/addq REG, imm
1492                     [(set GR64:$dst, (or_is_add GR64:$src1,
1493                                                 i64immSExt32:$src2))]>;
1494}
1495} // AddedComplexity, SchedRW
1496
1497//===----------------------------------------------------------------------===//
1498// Pattern match XOR as ADD
1499//===----------------------------------------------------------------------===//
1500
1501// Prefer to pattern match XOR with min_signed_value as ADD at isel time.
1502// ADD can be 3-addressified into an LEA instruction to avoid copies.
1503let AddedComplexity = 5 in {
1504def : Pat<(xor GR8:$src1, -128),
1505          (ADD8ri GR8:$src1, -128)>;
1506def : Pat<(xor GR16:$src1, -32768),
1507          (ADD16ri GR16:$src1, -32768)>;
1508def : Pat<(xor GR32:$src1, -2147483648),
1509          (ADD32ri GR32:$src1, -2147483648)>;
1510}
1511
1512//===----------------------------------------------------------------------===//
1513// Some peepholes
1514//===----------------------------------------------------------------------===//
1515
1516// Odd encoding trick: -128 fits into an 8-bit immediate field while
1517// +128 doesn't, so in this special case use a sub instead of an add.
1518let Predicates = [NoNDD] in {
1519  def : Pat<(add GR16:$src1, 128),
1520            (SUB16ri GR16:$src1, -128)>;
1521  def : Pat<(add GR32:$src1, 128),
1522            (SUB32ri GR32:$src1, -128)>;
1523  def : Pat<(add GR64:$src1, 128),
1524            (SUB64ri32 GR64:$src1, -128)>;
1525
1526  def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1527            (SUB16ri GR16:$src1, -128)>;
1528  def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1529            (SUB32ri GR32:$src1, -128)>;
1530  def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1531            (SUB64ri32 GR64:$src1, -128)>;
1532}
1533let Predicates = [HasNDD] in {
1534  def : Pat<(add GR16:$src1, 128),
1535            (SUB16ri_ND GR16:$src1, -128)>;
1536  def : Pat<(add GR32:$src1, 128),
1537            (SUB32ri_ND GR32:$src1, -128)>;
1538  def : Pat<(add GR64:$src1, 128),
1539            (SUB64ri32_ND GR64:$src1, -128)>;
1540
1541  def : Pat<(X86add_flag_nocf GR16:$src1, 128),
1542            (SUB16ri_ND GR16:$src1, -128)>;
1543  def : Pat<(X86add_flag_nocf GR32:$src1, 128),
1544            (SUB32ri_ND GR32:$src1, -128)>;
1545  def : Pat<(X86add_flag_nocf GR64:$src1, 128),
1546            (SUB64ri32_ND GR64:$src1, -128)>;
1547}
1548def : Pat<(store (add (loadi16 addr:$dst), 128), addr:$dst),
1549          (SUB16mi addr:$dst, -128)>;
1550def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
1551          (SUB32mi addr:$dst, -128)>;
1552def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
1553          (SUB64mi32 addr:$dst, -128)>;
1554let Predicates = [HasNDD] in {
1555  def : Pat<(add (loadi16 addr:$src), 128),
1556            (SUB16mi_ND addr:$src, -128)>;
1557  def : Pat<(add (loadi32 addr:$src), 128),
1558            (SUB32mi_ND addr:$src, -128)>;
1559  def : Pat<(add (loadi64 addr:$src), 128),
1560            (SUB64mi32_ND addr:$src, -128)>;
1561}
1562
1563// The same trick applies for 32-bit immediate fields in 64-bit
1564// instructions.
1565let Predicates = [NoNDD] in {
1566  def : Pat<(add GR64:$src1, 0x0000000080000000),
1567            (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1568  def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1569            (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
1570}
1571let Predicates = [HasNDD] in {
1572  def : Pat<(add GR64:$src1, 0x0000000080000000),
1573            (SUB64ri32_ND GR64:$src1, 0xffffffff80000000)>;
1574  def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
1575            (SUB64ri32_ND GR64:$src1, 0xffffffff80000000)>;
1576}
1577def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
1578          (SUB64mi32 addr:$dst, 0xffffffff80000000)>;
1579let Predicates = [HasNDD] in {
1580  def : Pat<(add(loadi64 addr:$src), 0x0000000080000000),
1581            (SUB64mi32_ND addr:$src, 0xffffffff80000000)>;
1582}
1583
1584// Depositing value to 8/16 bit subreg:
1585def : Pat<(or (and GR64:$dst, -256),
1586              (i64 (zextloadi8 addr:$src))),
1587          (INSERT_SUBREG (i64 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>;
1588
1589def : Pat<(or (and GR32:$dst, -256),
1590              (i32 (zextloadi8 addr:$src))),
1591          (INSERT_SUBREG (i32 (COPY $dst)), (MOV8rm  i8mem:$src), sub_8bit)>;
1592
1593def : Pat<(or (and GR64:$dst, -65536),
1594              (i64 (zextloadi16 addr:$src))),
1595          (INSERT_SUBREG (i64 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
1596
1597def : Pat<(or (and GR32:$dst, -65536),
1598              (i32 (zextloadi16 addr:$src))),
1599          (INSERT_SUBREG (i32 (COPY $dst)), (MOV16rm  i16mem:$src), sub_16bit)>;
1600
1601// To avoid needing to materialize an immediate in a register, use a 32-bit and
1602// with implicit zero-extension instead of a 64-bit and if the immediate has at
1603// least 32 bits of leading zeros. If in addition the last 32 bits can be
1604// represented with a sign extension of a 8 bit constant, use that.
1605// This can also reduce instruction size by eliminating the need for the REX
1606// prefix.
1607
1608// AddedComplexity is needed to give priority over i64immSExt8 and i64immSExt32.
1609let AddedComplexity = 1 in {
1610  let Predicates = [NoNDD] in {
1611    def : Pat<(and GR64:$src, i64immZExt32:$imm),
1612              (SUBREG_TO_REG
1613                (i64 0),
1614                (AND32ri
1615                  (EXTRACT_SUBREG GR64:$src, sub_32bit),
1616                  (i32 (GetLo32XForm imm:$imm))),
1617                sub_32bit)>;
1618  }
1619  let Predicates = [HasNDD] in {
1620    def : Pat<(and GR64:$src, i64immZExt32:$imm),
1621              (SUBREG_TO_REG
1622                (i64 0),
1623                (AND32ri_ND
1624                  (EXTRACT_SUBREG GR64:$src, sub_32bit),
1625                  (i32 (GetLo32XForm imm:$imm))),
1626                sub_32bit)>;
1627  }
1628} // AddedComplexity = 1
1629
1630
1631// AddedComplexity is needed due to the increased complexity on the
1632// i64immZExt32SExt8 and i64immZExt32 patterns above. Applying this to all
1633// the MOVZX patterns keeps thems together in DAGIsel tables.
1634let AddedComplexity = 1 in {
1635// r & (2^16-1) ==> movz
1636def : Pat<(and GR32:$src1, 0xffff),
1637          (MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
1638// r & (2^8-1) ==> movz
1639def : Pat<(and GR32:$src1, 0xff),
1640          (MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>;
1641// r & (2^8-1) ==> movz
1642def : Pat<(and GR16:$src1, 0xff),
1643           (EXTRACT_SUBREG (MOVZX32rr8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)),
1644             sub_16bit)>;
1645
1646// r & (2^32-1) ==> movz
1647def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
1648          (SUBREG_TO_REG (i64 0),
1649                         (MOV32rr (EXTRACT_SUBREG GR64:$src, sub_32bit)),
1650                         sub_32bit)>;
1651// r & (2^16-1) ==> movz
1652def : Pat<(and GR64:$src, 0xffff),
1653          (SUBREG_TO_REG (i64 0),
1654                      (MOVZX32rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit))),
1655                      sub_32bit)>;
1656// r & (2^8-1) ==> movz
1657def : Pat<(and GR64:$src, 0xff),
1658          (SUBREG_TO_REG (i64 0),
1659                         (MOVZX32rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit))),
1660                         sub_32bit)>;
1661} // AddedComplexity = 1
1662
1663
1664// Try to use BTS/BTR/BTC for single bit operations on the upper 32-bits.
1665
1666def BTRXForm : SDNodeXForm<imm, [{
1667  // Transformation function: Find the lowest 0.
1668  return getI64Imm((uint8_t)N->getAPIntValue().countr_one(), SDLoc(N));
1669}]>;
1670
1671def BTCBTSXForm : SDNodeXForm<imm, [{
1672  // Transformation function: Find the lowest 1.
1673  return getI64Imm((uint8_t)N->getAPIntValue().countr_zero(), SDLoc(N));
1674}]>;
1675
1676def BTRMask64 : ImmLeaf<i64, [{
1677  return !isUInt<32>(Imm) && !isInt<32>(Imm) && isPowerOf2_64(~Imm);
1678}]>;
1679
1680def BTCBTSMask64 : ImmLeaf<i64, [{
1681  return !isInt<32>(Imm) && isPowerOf2_64(Imm);
1682}]>;
1683
1684// For now only do this for optsize.
1685let AddedComplexity = 1, Predicates=[OptForSize] in {
1686  def : Pat<(and GR64:$src1, BTRMask64:$mask),
1687            (BTR64ri8 GR64:$src1, (BTRXForm imm:$mask))>;
1688  def : Pat<(or GR64:$src1, BTCBTSMask64:$mask),
1689            (BTS64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1690  def : Pat<(xor GR64:$src1, BTCBTSMask64:$mask),
1691            (BTC64ri8 GR64:$src1, (BTCBTSXForm imm:$mask))>;
1692}
1693
1694
1695// sext_inreg patterns
1696def : Pat<(sext_inreg GR32:$src, i16),
1697          (MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
1698def : Pat<(sext_inreg GR32:$src, i8),
1699          (MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>;
1700
1701def : Pat<(sext_inreg GR16:$src, i8),
1702           (EXTRACT_SUBREG (MOVSX32rr8 (EXTRACT_SUBREG GR16:$src, sub_8bit)),
1703             sub_16bit)>;
1704
1705def : Pat<(sext_inreg GR64:$src, i32),
1706          (MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
1707def : Pat<(sext_inreg GR64:$src, i16),
1708          (MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
1709def : Pat<(sext_inreg GR64:$src, i8),
1710          (MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
1711
1712// sext, sext_load, zext, zext_load
1713def: Pat<(i16 (sext GR8:$src)),
1714          (EXTRACT_SUBREG (MOVSX32rr8 GR8:$src), sub_16bit)>;
1715def: Pat<(sextloadi16i8 addr:$src),
1716          (EXTRACT_SUBREG (MOVSX32rm8 addr:$src), sub_16bit)>;
1717def: Pat<(i16 (zext GR8:$src)),
1718          (EXTRACT_SUBREG (MOVZX32rr8 GR8:$src), sub_16bit)>;
1719def: Pat<(zextloadi16i8 addr:$src),
1720          (EXTRACT_SUBREG (MOVZX32rm8 addr:$src), sub_16bit)>;
1721
1722// trunc patterns
1723def : Pat<(i16 (trunc GR32:$src)),
1724          (EXTRACT_SUBREG GR32:$src, sub_16bit)>;
1725def : Pat<(i8 (trunc GR32:$src)),
1726          (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
1727                          sub_8bit)>,
1728      Requires<[Not64BitMode]>;
1729def : Pat<(i8 (trunc GR16:$src)),
1730          (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
1731                          sub_8bit)>,
1732      Requires<[Not64BitMode]>;
1733def : Pat<(i32 (trunc GR64:$src)),
1734          (EXTRACT_SUBREG GR64:$src, sub_32bit)>;
1735def : Pat<(i16 (trunc GR64:$src)),
1736          (EXTRACT_SUBREG GR64:$src, sub_16bit)>;
1737def : Pat<(i8 (trunc GR64:$src)),
1738          (EXTRACT_SUBREG GR64:$src, sub_8bit)>;
1739def : Pat<(i8 (trunc GR32:$src)),
1740          (EXTRACT_SUBREG GR32:$src, sub_8bit)>,
1741      Requires<[In64BitMode]>;
1742def : Pat<(i8 (trunc GR16:$src)),
1743          (EXTRACT_SUBREG GR16:$src, sub_8bit)>,
1744      Requires<[In64BitMode]>;
1745
1746def immff00_ffff  : ImmLeaf<i32, [{
1747  return Imm >= 0xff00 && Imm <= 0xffff;
1748}]>;
1749
1750// h-register tricks
1751def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
1752          (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1753      Requires<[Not64BitMode]>;
1754def : Pat<(i8 (trunc (srl_su (i32 (anyext GR16:$src)), (i8 8)))),
1755          (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)>,
1756      Requires<[Not64BitMode]>;
1757def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
1758          (EXTRACT_SUBREG GR32:$src, sub_8bit_hi)>,
1759      Requires<[Not64BitMode]>;
1760def : Pat<(srl GR16:$src, (i8 8)),
1761          (EXTRACT_SUBREG
1762            (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1763            sub_16bit)>;
1764def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
1765          (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1766def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
1767          (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>;
1768def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
1769          (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1770def : Pat<(srl (and_su GR32:$src, immff00_ffff), (i8 8)),
1771          (MOVZX32rr8_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1772
1773// h-register tricks.
1774// For now, be conservative on x86-64 and use an h-register extract only if the
1775// value is immediately zero-extended or stored, which are somewhat common
1776// cases. This uses a bunch of code to prevent a register requiring a REX prefix
1777// from being allocated in the same instruction as the h register, as there's
1778// currently no way to describe this requirement to the register allocator.
1779
1780// h-register extract and zero-extend.
1781def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
1782          (SUBREG_TO_REG
1783            (i64 0),
1784            (MOVZX32rr8_NOREX
1785              (EXTRACT_SUBREG GR64:$src, sub_8bit_hi)),
1786            sub_32bit)>;
1787def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
1788          (SUBREG_TO_REG
1789            (i64 0),
1790            (MOVZX32rr8_NOREX
1791              (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1792            sub_32bit)>;
1793def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
1794          (SUBREG_TO_REG
1795            (i64 0),
1796            (MOVZX32rr8_NOREX
1797              (EXTRACT_SUBREG GR16:$src, sub_8bit_hi)),
1798            sub_32bit)>;
1799
1800// h-register extract and store.
1801def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
1802          (MOV8mr_NOREX
1803            addr:$dst,
1804            (EXTRACT_SUBREG GR64:$src, sub_8bit_hi))>;
1805def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
1806          (MOV8mr_NOREX
1807            addr:$dst,
1808            (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>,
1809      Requires<[In64BitMode]>;
1810def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
1811          (MOV8mr_NOREX
1812            addr:$dst,
1813            (EXTRACT_SUBREG GR16:$src, sub_8bit_hi))>,
1814      Requires<[In64BitMode]>;
1815
1816// Special pattern to catch the last step of __builtin_parity handling. Our
1817// goal is to use an xor of an h-register with the corresponding l-register.
1818// The above patterns would handle this on non 64-bit targets, but for 64-bit
1819// we need to be more careful. We're using a NOREX instruction here in case
1820// register allocation fails to keep the two registers together. So we need to
1821// make sure we can't accidentally mix R8-R15 with an h-register.
1822def : Pat<(X86xor_flag (i8 (trunc GR32:$src)),
1823                       (i8 (trunc (srl_su GR32:$src, (i8 8))))),
1824          (XOR8rr_NOREX (EXTRACT_SUBREG GR32:$src, sub_8bit),
1825                        (EXTRACT_SUBREG GR32:$src, sub_8bit_hi))>;
1826
1827// (shl x, 1) ==> (add x, x)
1828// Note that if x is undef (immediate or otherwise), we could theoretically
1829// end up with the two uses of x getting different values, producing a result
1830// where the least significant bit is not 0. However, the probability of this
1831// happening is considered low enough that this is officially not a
1832// "real problem".
1833let Predicates = [NoNDD] in {
1834  def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr  GR8 :$src1, GR8 :$src1)>;
1835  def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr GR16:$src1, GR16:$src1)>;
1836  def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr GR32:$src1, GR32:$src1)>;
1837  def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr GR64:$src1, GR64:$src1)>;
1838}
1839let Predicates = [HasNDD] in {
1840  def : Pat<(shl GR8 :$src1, (i8 1)), (ADD8rr_ND  GR8 :$src1, GR8 :$src1)>;
1841  def : Pat<(shl GR16:$src1, (i8 1)), (ADD16rr_ND GR16:$src1, GR16:$src1)>;
1842  def : Pat<(shl GR32:$src1, (i8 1)), (ADD32rr_ND GR32:$src1, GR32:$src1)>;
1843  def : Pat<(shl GR64:$src1, (i8 1)), (ADD64rr_ND GR64:$src1, GR64:$src1)>;
1844}
1845
1846// Shift amount is implicitly masked.
1847multiclass MaskedShiftAmountPats<SDNode frag> {
1848  // (shift x (and y, 31)) ==> (shift x, y)
1849  // (shift x (and y, 63)) ==> (shift x, y)
1850  let Predicates = [NoNDD] in {
1851    def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1852              (!cast<Instruction>(NAME # "8rCL") GR8:$src1)>;
1853    def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1854              (!cast<Instruction>(NAME # "16rCL") GR16:$src1)>;
1855    def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1856              (!cast<Instruction>(NAME # "32rCL") GR32:$src1)>;
1857    def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1858              (!cast<Instruction>(NAME # "64rCL") GR64:$src1)>;
1859  }
1860  let Predicates = [HasNDD] in {
1861    def : Pat<(frag GR8:$src1, (shiftMask32 CL)),
1862              (!cast<Instruction>(NAME # "8rCL_ND") GR8:$src1)>;
1863    def : Pat<(frag GR16:$src1, (shiftMask32 CL)),
1864              (!cast<Instruction>(NAME # "16rCL_ND") GR16:$src1)>;
1865    def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1866              (!cast<Instruction>(NAME # "32rCL_ND") GR32:$src1)>;
1867    def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1868              (!cast<Instruction>(NAME # "64rCL_ND") GR64:$src1)>;
1869  }
1870
1871  def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask32 CL)), addr:$dst),
1872            (!cast<Instruction>(NAME # "8mCL") addr:$dst)>;
1873  def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask32 CL)), addr:$dst),
1874            (!cast<Instruction>(NAME # "16mCL") addr:$dst)>;
1875  def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1876            (!cast<Instruction>(NAME # "32mCL") addr:$dst)>;
1877  def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1878            (!cast<Instruction>(NAME # "64mCL") addr:$dst)>;
1879
1880  let Predicates = [HasNDD] in {
1881    def : Pat<(frag (loadi8 addr:$src), (shiftMask32 CL)),
1882              (!cast<Instruction>(NAME # "8mCL_ND") addr:$src)>;
1883    def : Pat<(frag (loadi16 addr:$src), (shiftMask32 CL)),
1884              (!cast<Instruction>(NAME # "16mCL_ND") addr:$src)>;
1885    def : Pat<(frag (loadi32 addr:$src), (shiftMask32 CL)),
1886              (!cast<Instruction>(NAME # "32mCL_ND") addr:$src)>;
1887    def : Pat<(frag (loadi64 addr:$src), (shiftMask64 CL)),
1888              (!cast<Instruction>(NAME # "64mCL_ND") addr:$src)>;
1889  }
1890}
1891
1892defm SHL : MaskedShiftAmountPats<shl>;
1893defm SHR : MaskedShiftAmountPats<srl>;
1894defm SAR : MaskedShiftAmountPats<sra>;
1895
1896// ROL/ROR instructions allow a stronger mask optimization than shift for 8- and
1897// 16-bit. We can remove a mask of any (bitwidth - 1) on the rotation amount
1898// because over-rotating produces the same result. This is noted in the Intel
1899// docs with: "tempCOUNT <- (COUNT & COUNTMASK) MOD SIZE". Masking the rotation
1900// amount could affect EFLAGS results, but that does not matter because we are
1901// not tracking flags for these nodes.
1902multiclass MaskedRotateAmountPats<SDNode frag> {
1903  // (rot x (and y, BitWidth - 1)) ==> (rot x, y)
1904  let Predicates = [NoNDD] in {
1905    def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1906              (!cast<Instruction>(NAME # "8rCL") GR8:$src1)>;
1907    def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1908              (!cast<Instruction>(NAME # "16rCL") GR16:$src1)>;
1909    def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1910              (!cast<Instruction>(NAME # "32rCL") GR32:$src1)>;
1911    def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1912              (!cast<Instruction>(NAME # "64rCL") GR64:$src1)>;
1913  }
1914  let Predicates = [HasNDD] in {
1915    def : Pat<(frag GR8:$src1, (shiftMask8 CL)),
1916              (!cast<Instruction>(NAME # "8rCL_ND") GR8:$src1)>;
1917    def : Pat<(frag GR16:$src1, (shiftMask16 CL)),
1918              (!cast<Instruction>(NAME # "16rCL_ND") GR16:$src1)>;
1919    def : Pat<(frag GR32:$src1, (shiftMask32 CL)),
1920              (!cast<Instruction>(NAME # "32rCL_ND") GR32:$src1)>;
1921    def : Pat<(frag GR64:$src1, (shiftMask64 CL)),
1922              (!cast<Instruction>(NAME # "64rCL_ND") GR64:$src1)>;
1923  }
1924
1925  def : Pat<(store (frag (loadi8 addr:$dst), (shiftMask8 CL)), addr:$dst),
1926            (!cast<Instruction>(NAME # "8mCL") addr:$dst)>;
1927  def : Pat<(store (frag (loadi16 addr:$dst), (shiftMask16 CL)), addr:$dst),
1928            (!cast<Instruction>(NAME # "16mCL") addr:$dst)>;
1929  def : Pat<(store (frag (loadi32 addr:$dst), (shiftMask32 CL)), addr:$dst),
1930            (!cast<Instruction>(NAME # "32mCL") addr:$dst)>;
1931  def : Pat<(store (frag (loadi64 addr:$dst), (shiftMask64 CL)), addr:$dst),
1932            (!cast<Instruction>(NAME # "64mCL") addr:$dst)>;
1933
1934  let Predicates = [HasNDD] in {
1935    def : Pat<(frag (loadi8 addr:$src), (shiftMask8 CL)),
1936              (!cast<Instruction>(NAME # "8mCL_ND") addr:$src)>;
1937    def : Pat<(frag (loadi16 addr:$src), (shiftMask16 CL)),
1938              (!cast<Instruction>(NAME # "16mCL_ND") addr:$src)>;
1939    def : Pat<(frag (loadi32 addr:$src), (shiftMask32 CL)),
1940              (!cast<Instruction>(NAME # "32mCL_ND") addr:$src)>;
1941    def : Pat<(frag (loadi64 addr:$src), (shiftMask64 CL)),
1942              (!cast<Instruction>(NAME # "64mCL_ND") addr:$src)>;
1943  }
1944}
1945
1946defm ROL : MaskedRotateAmountPats<rotl>;
1947defm ROR : MaskedRotateAmountPats<rotr>;
1948
1949multiclass MaskedShlrdAmountPats<string suffix, Predicate p> {
1950  let Predicates = [p] in {
1951    // Double "funnel" shift amount is implicitly masked.
1952    // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y) (NOTE: modulo32)
1953    def : Pat<(X86fshl GR16:$src1, GR16:$src2, (shiftMask32 CL)),
1954              (!cast<Instruction>(SHLD16rrCL#suffix) GR16:$src1, GR16:$src2)>;
1955    def : Pat<(X86fshr GR16:$src2, GR16:$src1, (shiftMask32 CL)),
1956              (!cast<Instruction>(SHRD16rrCL#suffix) GR16:$src1, GR16:$src2)>;
1957
1958    // (fshl/fshr x (and y, 31)) ==> (fshl/fshr x, y)
1959    def : Pat<(fshl GR32:$src1, GR32:$src2, (shiftMask32 CL)),
1960              (!cast<Instruction>(SHLD32rrCL#suffix) GR32:$src1, GR32:$src2)>;
1961    def : Pat<(fshr GR32:$src2, GR32:$src1, (shiftMask32 CL)),
1962              (!cast<Instruction>(SHRD32rrCL#suffix) GR32:$src1, GR32:$src2)>;
1963
1964    // (fshl/fshr x (and y, 63)) ==> (fshl/fshr x, y)
1965    def : Pat<(fshl GR64:$src1, GR64:$src2, (shiftMask64 CL)),
1966              (!cast<Instruction>(SHLD64rrCL#suffix) GR64:$src1, GR64:$src2)>;
1967    def : Pat<(fshr GR64:$src2, GR64:$src1, (shiftMask64 CL)),
1968              (!cast<Instruction>(SHRD64rrCL#suffix) GR64:$src1, GR64:$src2)>;
1969  }
1970}
1971
1972defm : MaskedShlrdAmountPats<"", NoNDD>;
1973defm : MaskedShlrdAmountPats<"_ND", HasNDD>;
1974
1975// Use BTR/BTS/BTC for clearing/setting/toggling a bit in a variable location.
1976multiclass OneBitPats<RegisterClass rc, ValueType vt, Instruction btr,
1977                      Instruction bts, Instruction btc, PatFrag mask> {
1978  def : Pat<(and rc:$src1, (rotl -2, GR8:$src2)),
1979            (btr rc:$src1,
1980                 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1981  def : Pat<(or rc:$src1, (shl 1, GR8:$src2)),
1982            (bts rc:$src1,
1983                 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1984  def : Pat<(xor rc:$src1, (shl 1, GR8:$src2)),
1985            (btc rc:$src1,
1986                 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1987
1988  // Similar to above, but removing unneeded masking of the shift amount.
1989  def : Pat<(and rc:$src1, (rotl -2, (mask GR8:$src2))),
1990            (btr rc:$src1,
1991                 (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1992  def : Pat<(or rc:$src1, (shl 1, (mask GR8:$src2))),
1993            (bts rc:$src1,
1994                (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1995  def : Pat<(xor rc:$src1, (shl 1, (mask GR8:$src2))),
1996            (btc rc:$src1,
1997                (INSERT_SUBREG (vt (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>;
1998}
1999
2000defm : OneBitPats<GR16, i16, BTR16rr, BTS16rr, BTC16rr, shiftMask16>;
2001defm : OneBitPats<GR32, i32, BTR32rr, BTS32rr, BTC32rr, shiftMask32>;
2002defm : OneBitPats<GR64, i64, BTR64rr, BTS64rr, BTC64rr, shiftMask64>;
2003
2004//===----------------------------------------------------------------------===//
2005// EFLAGS-defining Patterns
2006//===----------------------------------------------------------------------===//
2007
2008multiclass EFLAGSDefiningPats<string suffix, Predicate p> {
2009  let Predicates = [p] in {
2010    // add reg, reg
2011    def : Pat<(add GR8 :$src1, GR8 :$src2), (!cast<Instruction>(ADD8rr#suffix) GR8 :$src1, GR8 :$src2)>;
2012    def : Pat<(add GR16:$src1, GR16:$src2), (!cast<Instruction>(ADD16rr#suffix) GR16:$src1, GR16:$src2)>;
2013    def : Pat<(add GR32:$src1, GR32:$src2), (!cast<Instruction>(ADD32rr#suffix) GR32:$src1, GR32:$src2)>;
2014    def : Pat<(add GR64:$src1, GR64:$src2), (!cast<Instruction>(ADD64rr#suffix) GR64:$src1, GR64:$src2)>;
2015
2016    // add reg, mem
2017    def : Pat<(add GR8:$src1, (loadi8 addr:$src2)),
2018              (!cast<Instruction>(ADD8rm#suffix) GR8:$src1, addr:$src2)>;
2019    def : Pat<(add GR16:$src1, (loadi16 addr:$src2)),
2020              (!cast<Instruction>(ADD16rm#suffix) GR16:$src1, addr:$src2)>;
2021    def : Pat<(add GR32:$src1, (loadi32 addr:$src2)),
2022              (!cast<Instruction>(ADD32rm#suffix) GR32:$src1, addr:$src2)>;
2023    def : Pat<(add GR64:$src1, (loadi64 addr:$src2)),
2024              (!cast<Instruction>(ADD64rm#suffix) GR64:$src1, addr:$src2)>;
2025
2026    // add reg, imm
2027    def : Pat<(add GR8 :$src1, imm:$src2), (!cast<Instruction>(ADD8ri#suffix) GR8:$src1 , imm:$src2)>;
2028    def : Pat<(add GR16:$src1, imm:$src2), (!cast<Instruction>(ADD16ri#suffix) GR16:$src1, imm:$src2)>;
2029    def : Pat<(add GR32:$src1, imm:$src2), (!cast<Instruction>(ADD32ri#suffix) GR32:$src1, imm:$src2)>;
2030    def : Pat<(add GR64:$src1, i64immSExt32:$src2), (!cast<Instruction>(ADD64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2031
2032    // sub reg, reg
2033    def : Pat<(sub GR8 :$src1, GR8 :$src2), (!cast<Instruction>(SUB8rr#suffix)  GR8 :$src1, GR8 :$src2)>;
2034    def : Pat<(sub GR16:$src1, GR16:$src2), (!cast<Instruction>(SUB16rr#suffix) GR16:$src1, GR16:$src2)>;
2035    def : Pat<(sub GR32:$src1, GR32:$src2), (!cast<Instruction>(SUB32rr#suffix) GR32:$src1, GR32:$src2)>;
2036    def : Pat<(sub GR64:$src1, GR64:$src2), (!cast<Instruction>(SUB64rr#suffix) GR64:$src1, GR64:$src2)>;
2037
2038    // sub reg, mem
2039    def : Pat<(sub GR8:$src1, (loadi8 addr:$src2)),
2040              (!cast<Instruction>(SUB8rm#suffix) GR8:$src1, addr:$src2)>;
2041    def : Pat<(sub GR16:$src1, (loadi16 addr:$src2)),
2042              (!cast<Instruction>(SUB16rm#suffix) GR16:$src1, addr:$src2)>;
2043    def : Pat<(sub GR32:$src1, (loadi32 addr:$src2)),
2044              (!cast<Instruction>(SUB32rm#suffix) GR32:$src1, addr:$src2)>;
2045    def : Pat<(sub GR64:$src1, (loadi64 addr:$src2)),
2046              (!cast<Instruction>(SUB64rm#suffix) GR64:$src1, addr:$src2)>;
2047
2048    // sub reg, imm
2049    def : Pat<(sub GR8:$src1, imm:$src2),
2050              (!cast<Instruction>(SUB8ri#suffix) GR8:$src1, imm:$src2)>;
2051    def : Pat<(sub GR16:$src1, imm:$src2),
2052              (!cast<Instruction>(SUB16ri#suffix) GR16:$src1, imm:$src2)>;
2053    def : Pat<(sub GR32:$src1, imm:$src2),
2054              (!cast<Instruction>(SUB32ri#suffix) GR32:$src1, imm:$src2)>;
2055    def : Pat<(sub GR64:$src1, i64immSExt32:$src2),
2056              (!cast<Instruction>(SUB64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2057
2058    // sub 0, reg
2059    def : Pat<(X86sub_flag 0, GR8 :$src), (!cast<Instruction>(NEG8r#suffix)  GR8 :$src)>;
2060    def : Pat<(X86sub_flag 0, GR16:$src), (!cast<Instruction>(NEG16r#suffix) GR16:$src)>;
2061    def : Pat<(X86sub_flag 0, GR32:$src), (!cast<Instruction>(NEG32r#suffix) GR32:$src)>;
2062    def : Pat<(X86sub_flag 0, GR64:$src), (!cast<Instruction>(NEG64r#suffix) GR64:$src)>;
2063
2064    // mul reg, reg
2065    def : Pat<(mul GR16:$src1, GR16:$src2),
2066              (!cast<Instruction>(IMUL16rr#suffix) GR16:$src1, GR16:$src2)>;
2067    def : Pat<(mul GR32:$src1, GR32:$src2),
2068              (!cast<Instruction>(IMUL32rr#suffix) GR32:$src1, GR32:$src2)>;
2069    def : Pat<(mul GR64:$src1, GR64:$src2),
2070              (!cast<Instruction>(IMUL64rr#suffix) GR64:$src1, GR64:$src2)>;
2071
2072    // mul reg, mem
2073    def : Pat<(mul GR16:$src1, (loadi16 addr:$src2)),
2074              (!cast<Instruction>(IMUL16rm#suffix) GR16:$src1, addr:$src2)>;
2075    def : Pat<(mul GR32:$src1, (loadi32 addr:$src2)),
2076              (!cast<Instruction>(IMUL32rm#suffix) GR32:$src1, addr:$src2)>;
2077    def : Pat<(mul GR64:$src1, (loadi64 addr:$src2)),
2078              (!cast<Instruction>(IMUL64rm#suffix) GR64:$src1, addr:$src2)>;
2079
2080    // or reg/reg.
2081    def : Pat<(or GR8 :$src1, GR8 :$src2), (!cast<Instruction>(OR8rr#suffix)  GR8 :$src1, GR8 :$src2)>;
2082    def : Pat<(or GR16:$src1, GR16:$src2), (!cast<Instruction>(OR16rr#suffix) GR16:$src1, GR16:$src2)>;
2083    def : Pat<(or GR32:$src1, GR32:$src2), (!cast<Instruction>(OR32rr#suffix) GR32:$src1, GR32:$src2)>;
2084    def : Pat<(or GR64:$src1, GR64:$src2), (!cast<Instruction>(OR64rr#suffix) GR64:$src1, GR64:$src2)>;
2085
2086    // or reg/mem
2087    def : Pat<(or GR8:$src1, (loadi8 addr:$src2)),
2088              (!cast<Instruction>(OR8rm#suffix) GR8:$src1, addr:$src2)>;
2089    def : Pat<(or GR16:$src1, (loadi16 addr:$src2)),
2090              (!cast<Instruction>(OR16rm#suffix) GR16:$src1, addr:$src2)>;
2091    def : Pat<(or GR32:$src1, (loadi32 addr:$src2)),
2092              (!cast<Instruction>(OR32rm#suffix) GR32:$src1, addr:$src2)>;
2093    def : Pat<(or GR64:$src1, (loadi64 addr:$src2)),
2094              (!cast<Instruction>(OR64rm#suffix) GR64:$src1, addr:$src2)>;
2095
2096    // or reg/imm
2097    def : Pat<(or GR8:$src1 , imm:$src2), (!cast<Instruction>(OR8ri#suffix)  GR8 :$src1, imm:$src2)>;
2098    def : Pat<(or GR16:$src1, imm:$src2), (!cast<Instruction>(OR16ri#suffix) GR16:$src1, imm:$src2)>;
2099    def : Pat<(or GR32:$src1, imm:$src2), (!cast<Instruction>(OR32ri#suffix) GR32:$src1, imm:$src2)>;
2100    def : Pat<(or GR64:$src1, i64immSExt32:$src2),
2101              (!cast<Instruction>(OR64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2102
2103    // xor reg/reg
2104    def : Pat<(xor GR8 :$src1, GR8 :$src2), (!cast<Instruction>(XOR8rr#suffix)  GR8 :$src1, GR8 :$src2)>;
2105    def : Pat<(xor GR16:$src1, GR16:$src2), (!cast<Instruction>(XOR16rr#suffix) GR16:$src1, GR16:$src2)>;
2106    def : Pat<(xor GR32:$src1, GR32:$src2), (!cast<Instruction>(XOR32rr#suffix) GR32:$src1, GR32:$src2)>;
2107    def : Pat<(xor GR64:$src1, GR64:$src2), (!cast<Instruction>(XOR64rr#suffix) GR64:$src1, GR64:$src2)>;
2108
2109    // xor reg/mem
2110    def : Pat<(xor GR8:$src1, (loadi8 addr:$src2)),
2111              (!cast<Instruction>(XOR8rm#suffix) GR8:$src1, addr:$src2)>;
2112    def : Pat<(xor GR16:$src1, (loadi16 addr:$src2)),
2113              (!cast<Instruction>(XOR16rm#suffix) GR16:$src1, addr:$src2)>;
2114    def : Pat<(xor GR32:$src1, (loadi32 addr:$src2)),
2115              (!cast<Instruction>(XOR32rm#suffix) GR32:$src1, addr:$src2)>;
2116    def : Pat<(xor GR64:$src1, (loadi64 addr:$src2)),
2117              (!cast<Instruction>(XOR64rm#suffix) GR64:$src1, addr:$src2)>;
2118
2119    // xor reg/imm
2120    def : Pat<(xor GR8:$src1, imm:$src2),
2121              (!cast<Instruction>(XOR8ri#suffix) GR8:$src1, imm:$src2)>;
2122    def : Pat<(xor GR16:$src1, imm:$src2),
2123              (!cast<Instruction>(XOR16ri#suffix) GR16:$src1, imm:$src2)>;
2124    def : Pat<(xor GR32:$src1, imm:$src2),
2125              (!cast<Instruction>(XOR32ri#suffix) GR32:$src1, imm:$src2)>;
2126    def : Pat<(xor GR64:$src1, i64immSExt32:$src2),
2127              (!cast<Instruction>(XOR64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2128
2129    // and reg/reg
2130    def : Pat<(and GR8 :$src1, GR8 :$src2), (!cast<Instruction>(AND8rr#suffix)  GR8 :$src1, GR8 :$src2)>;
2131    def : Pat<(and GR16:$src1, GR16:$src2), (!cast<Instruction>(AND16rr#suffix) GR16:$src1, GR16:$src2)>;
2132    def : Pat<(and GR32:$src1, GR32:$src2), (!cast<Instruction>(AND32rr#suffix) GR32:$src1, GR32:$src2)>;
2133    def : Pat<(and GR64:$src1, GR64:$src2), (!cast<Instruction>(AND64rr#suffix) GR64:$src1, GR64:$src2)>;
2134
2135    // and reg/mem
2136    def : Pat<(and GR8:$src1, (loadi8 addr:$src2)),
2137              (!cast<Instruction>(AND8rm#suffix) GR8:$src1, addr:$src2)>;
2138    def : Pat<(and GR16:$src1, (loadi16 addr:$src2)),
2139              (!cast<Instruction>(AND16rm#suffix) GR16:$src1, addr:$src2)>;
2140    def : Pat<(and GR32:$src1, (loadi32 addr:$src2)),
2141              (!cast<Instruction>(AND32rm#suffix) GR32:$src1, addr:$src2)>;
2142    def : Pat<(and GR64:$src1, (loadi64 addr:$src2)),
2143              (!cast<Instruction>(AND64rm#suffix) GR64:$src1, addr:$src2)>;
2144
2145    // and reg/imm
2146    def : Pat<(and GR8:$src1, imm:$src2),
2147              (!cast<Instruction>(AND8ri#suffix) GR8:$src1, imm:$src2)>;
2148    def : Pat<(and GR16:$src1, imm:$src2),
2149              (!cast<Instruction>(AND16ri#suffix) GR16:$src1, imm:$src2)>;
2150    def : Pat<(and GR32:$src1, imm:$src2),
2151              (!cast<Instruction>(AND32ri#suffix) GR32:$src1, imm:$src2)>;
2152    def : Pat<(and GR64:$src1, i64immSExt32:$src2),
2153              (!cast<Instruction>(AND64ri32#suffix) GR64:$src1, i64immSExt32:$src2)>;
2154  }
2155
2156  // Increment/Decrement reg.
2157  // Do not make INC/DEC if it is slow
2158  let Predicates = [UseIncDec, p] in {
2159    def : Pat<(add GR8:$src, 1),   (!cast<Instruction>(INC8r#suffix) GR8:$src)>;
2160    def : Pat<(add GR16:$src, 1),  (!cast<Instruction>(INC16r#suffix) GR16:$src)>;
2161    def : Pat<(add GR32:$src, 1),  (!cast<Instruction>(INC32r#suffix) GR32:$src)>;
2162    def : Pat<(add GR64:$src, 1),  (!cast<Instruction>(INC64r#suffix) GR64:$src)>;
2163    def : Pat<(add GR8:$src, -1),  (!cast<Instruction>(DEC8r#suffix) GR8:$src)>;
2164    def : Pat<(add GR16:$src, -1), (!cast<Instruction>(DEC16r#suffix) GR16:$src)>;
2165    def : Pat<(add GR32:$src, -1), (!cast<Instruction>(DEC32r#suffix) GR32:$src)>;
2166    def : Pat<(add GR64:$src, -1), (!cast<Instruction>(DEC64r#suffix) GR64:$src)>;
2167
2168    def : Pat<(X86add_flag_nocf GR8:$src, -1),  (!cast<Instruction>(DEC8r#suffix) GR8:$src)>;
2169    def : Pat<(X86add_flag_nocf GR16:$src, -1), (!cast<Instruction>(DEC16r#suffix) GR16:$src)>;
2170    def : Pat<(X86add_flag_nocf GR32:$src, -1), (!cast<Instruction>(DEC32r#suffix) GR32:$src)>;
2171    def : Pat<(X86add_flag_nocf GR64:$src, -1), (!cast<Instruction>(DEC64r#suffix) GR64:$src)>;
2172    def : Pat<(X86sub_flag_nocf GR8:$src, -1),  (!cast<Instruction>(INC8r#suffix) GR8:$src)>;
2173    def : Pat<(X86sub_flag_nocf GR16:$src, -1), (!cast<Instruction>(INC16r#suffix) GR16:$src)>;
2174    def : Pat<(X86sub_flag_nocf GR32:$src, -1), (!cast<Instruction>(INC32r#suffix) GR32:$src)>;
2175    def : Pat<(X86sub_flag_nocf GR64:$src, -1), (!cast<Instruction>(INC64r#suffix) GR64:$src)>;
2176
2177    def : Pat<(or_is_add GR8:$src, 1),   (!cast<Instruction>(INC8r#suffix) GR8:$src)>;
2178    def : Pat<(or_is_add GR16:$src, 1),  (!cast<Instruction>(INC16r#suffix) GR16:$src)>;
2179    def : Pat<(or_is_add GR32:$src, 1),  (!cast<Instruction>(INC32r#suffix) GR32:$src)>;
2180    def : Pat<(or_is_add GR64:$src, 1),  (!cast<Instruction>(INC64r#suffix) GR64:$src)>;
2181  }
2182}
2183
2184defm : EFLAGSDefiningPats<"", NoNDD>;
2185defm : EFLAGSDefiningPats<"_ND", HasNDD>;
2186
2187let Predicates = [HasZU] in {
2188  // zext (mul reg/mem, imm) -> imulzu
2189  def : Pat<(i32 (zext (i16 (mul GR16:$src1, imm:$src2)))),
2190            (SUBREG_TO_REG (i32 0), (IMULZU16rri GR16:$src1, imm:$src2), sub_16bit)>;
2191  def : Pat<(i32 (zext (i16 (mul (loadi16 addr:$src1), imm:$src2)))),
2192            (SUBREG_TO_REG (i32 0), (IMULZU16rmi addr:$src1, imm:$src2), sub_16bit)>;
2193  def : Pat<(i64 (zext (i16 (mul GR16:$src1, imm:$src2)))),
2194            (SUBREG_TO_REG (i64 0), (IMULZU16rri GR16:$src1, imm:$src2), sub_16bit)>;
2195  def : Pat<(i64 (zext (i16 (mul (loadi16 addr:$src1), imm:$src2)))),
2196            (SUBREG_TO_REG (i64 0), (IMULZU16rmi addr:$src1, imm:$src2), sub_16bit)>;
2197}
2198
2199// mul reg, imm
2200def : Pat<(mul GR16:$src1, imm:$src2),
2201          (IMUL16rri GR16:$src1, imm:$src2)>;
2202def : Pat<(mul GR32:$src1, imm:$src2),
2203          (IMUL32rri GR32:$src1, imm:$src2)>;
2204def : Pat<(mul GR64:$src1, i64immSExt32:$src2),
2205          (IMUL64rri32 GR64:$src1, i64immSExt32:$src2)>;
2206
2207// reg = mul mem, imm
2208def : Pat<(mul (loadi16 addr:$src1), imm:$src2),
2209          (IMUL16rmi addr:$src1, imm:$src2)>;
2210def : Pat<(mul (loadi32 addr:$src1), imm:$src2),
2211          (IMUL32rmi addr:$src1, imm:$src2)>;
2212def : Pat<(mul (loadi64 addr:$src1), i64immSExt32:$src2),
2213          (IMUL64rmi32 addr:$src1, i64immSExt32:$src2)>;
2214
2215// Bit scan instruction patterns to match explicit zero-undef behavior.
2216def : Pat<(cttz_zero_undef GR16:$src), (BSF16rr (i16 (IMPLICIT_DEF)), GR16:$src)>;
2217def : Pat<(cttz_zero_undef GR32:$src), (BSF32rr (i32 (IMPLICIT_DEF)), GR32:$src)>;
2218def : Pat<(cttz_zero_undef GR64:$src), (BSF64rr (i64 (IMPLICIT_DEF)), GR64:$src)>;
2219def : Pat<(cttz_zero_undef (loadi16 addr:$src)), (BSF16rm (i16 (IMPLICIT_DEF)), addr:$src)>;
2220def : Pat<(cttz_zero_undef (loadi32 addr:$src)), (BSF32rm (i32 (IMPLICIT_DEF)), addr:$src)>;
2221def : Pat<(cttz_zero_undef (loadi64 addr:$src)), (BSF64rm (i64 (IMPLICIT_DEF)), addr:$src)>;
2222
2223// When HasMOVBE is enabled it is possible to get a non-legalized
2224// register-register 16 bit bswap. This maps it to a ROL instruction.
2225let Predicates = [HasMOVBE] in {
2226 def : Pat<(bswap GR16:$src), (ROL16ri GR16:$src, (i8 8))>;
2227}
2228