xref: /llvm-project/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp (revision db39d479289daa6e9c099ffdb5366a3331703139)
1 //===-- X86EncodingOptimization.cpp - X86 Encoding optimization -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the X86 encoding optimization
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "X86EncodingOptimization.h"
14 #include "X86BaseInfo.h"
15 #include "llvm/MC/MCInst.h"
16 #include "llvm/MC/MCInstrDesc.h"
17 
18 using namespace llvm;
19 
20 static bool shouldExchange(const MCInst &MI, unsigned OpIdx1, unsigned OpIdx2) {
21   return !X86II::isX86_64ExtendedReg(MI.getOperand(OpIdx1).getReg()) &&
22          X86II::isX86_64ExtendedReg(MI.getOperand(OpIdx2).getReg());
23 }
24 
25 bool X86::optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc) {
26   unsigned OpIdx1, OpIdx2;
27   unsigned NewOpc;
28   unsigned Opcode = MI.getOpcode();
29 #define FROM_TO(FROM, TO, IDX1, IDX2)                                          \
30   case X86::FROM:                                                              \
31     NewOpc = X86::TO;                                                          \
32     OpIdx1 = IDX1;                                                             \
33     OpIdx2 = IDX2;                                                             \
34     break;
35 #define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 1)
36   switch (MI.getOpcode()) {
37   default: {
38     // If the instruction is a commutable arithmetic instruction we might be
39     // able to commute the operands to get a 2 byte VEX prefix.
40     uint64_t TSFlags = Desc.TSFlags;
41     if (!Desc.isCommutable() || (TSFlags & X86II::EncodingMask) != X86II::VEX ||
42         (TSFlags & X86II::OpMapMask) != X86II::TB ||
43         (TSFlags & X86II::FormMask) != X86II::MRMSrcReg ||
44         (TSFlags & X86II::REX_W) || !(TSFlags & X86II::VEX_4V) ||
45         MI.getNumOperands() != 3)
46       return false;
47     // These two are not truly commutable.
48     if (Opcode == X86::VMOVHLPSrr || Opcode == X86::VUNPCKHPDrr)
49       return false;
50     OpIdx1 = 1;
51     OpIdx2 = 2;
52     if (!shouldExchange(MI, OpIdx1, OpIdx2))
53       return false;
54     std::swap(MI.getOperand(OpIdx1), MI.getOperand(OpIdx2));
55     return true;
56   }
57     // Commute operands to get a smaller encoding by using VEX.R instead of
58     // VEX.B if one of the registers is extended, but other isn't.
59     FROM_TO(VMOVZPQILo2PQIrr, VMOVPQI2QIrr, 0, 1)
60     TO_REV(VMOVAPDrr)
61     TO_REV(VMOVAPDYrr)
62     TO_REV(VMOVAPSrr)
63     TO_REV(VMOVAPSYrr)
64     TO_REV(VMOVDQArr)
65     TO_REV(VMOVDQAYrr)
66     TO_REV(VMOVDQUrr)
67     TO_REV(VMOVDQUYrr)
68     TO_REV(VMOVUPDrr)
69     TO_REV(VMOVUPDYrr)
70     TO_REV(VMOVUPSrr)
71     TO_REV(VMOVUPSYrr)
72 #undef TO_REV
73 #define TO_REV(FROM) FROM_TO(FROM, FROM##_REV, 0, 2)
74     TO_REV(VMOVSDrr)
75     TO_REV(VMOVSSrr)
76 #undef TO_REV
77 #undef FROM_TO
78   }
79   if (!shouldExchange(MI, OpIdx1, OpIdx2))
80     return false;
81   MI.setOpcode(NewOpc);
82   return true;
83 }
84 
85 // NOTE: We may write this as an InstAlias if it's only used by AsmParser. See
86 // validateTargetOperandClass.
87 bool X86::optimizeShiftRotateWithImmediateOne(MCInst &MI) {
88   unsigned NewOpc;
89 #define TO_IMM1(FROM)                                                          \
90   case X86::FROM##i:                                                           \
91     NewOpc = X86::FROM##1;                                                     \
92     break;
93   switch (MI.getOpcode()) {
94   default:
95     return false;
96     TO_IMM1(RCR8r)
97     TO_IMM1(RCR16r)
98     TO_IMM1(RCR32r)
99     TO_IMM1(RCR64r)
100     TO_IMM1(RCL8r)
101     TO_IMM1(RCL16r)
102     TO_IMM1(RCL32r)
103     TO_IMM1(RCL64r)
104     TO_IMM1(ROR8r)
105     TO_IMM1(ROR16r)
106     TO_IMM1(ROR32r)
107     TO_IMM1(ROR64r)
108     TO_IMM1(ROL8r)
109     TO_IMM1(ROL16r)
110     TO_IMM1(ROL32r)
111     TO_IMM1(ROL64r)
112     TO_IMM1(SAR8r)
113     TO_IMM1(SAR16r)
114     TO_IMM1(SAR32r)
115     TO_IMM1(SAR64r)
116     TO_IMM1(SHR8r)
117     TO_IMM1(SHR16r)
118     TO_IMM1(SHR32r)
119     TO_IMM1(SHR64r)
120     TO_IMM1(SHL8r)
121     TO_IMM1(SHL16r)
122     TO_IMM1(SHL32r)
123     TO_IMM1(SHL64r)
124     TO_IMM1(RCR8m)
125     TO_IMM1(RCR16m)
126     TO_IMM1(RCR32m)
127     TO_IMM1(RCR64m)
128     TO_IMM1(RCL8m)
129     TO_IMM1(RCL16m)
130     TO_IMM1(RCL32m)
131     TO_IMM1(RCL64m)
132     TO_IMM1(ROR8m)
133     TO_IMM1(ROR16m)
134     TO_IMM1(ROR32m)
135     TO_IMM1(ROR64m)
136     TO_IMM1(ROL8m)
137     TO_IMM1(ROL16m)
138     TO_IMM1(ROL32m)
139     TO_IMM1(ROL64m)
140     TO_IMM1(SAR8m)
141     TO_IMM1(SAR16m)
142     TO_IMM1(SAR32m)
143     TO_IMM1(SAR64m)
144     TO_IMM1(SHR8m)
145     TO_IMM1(SHR16m)
146     TO_IMM1(SHR32m)
147     TO_IMM1(SHR64m)
148     TO_IMM1(SHL8m)
149     TO_IMM1(SHL16m)
150     TO_IMM1(SHL32m)
151     TO_IMM1(SHL64m)
152   }
153   MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);
154   if (!LastOp.isImm() || LastOp.getImm() != 1)
155     return false;
156   MI.setOpcode(NewOpc);
157   MI.erase(&LastOp);
158   return true;
159 }
160