1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-26 04:32:44 +01:00

[llvm-exegesis] Improve Register Setup.

Summary:
Added function to set a register to a particular value + tests.
Add EFLAGS test, use new setRegTo instead of setRegToConstant.

Reviewers: courbet, javed.absar

Subscribers: mgorny, tschuett, llvm-commits

Differential Revision: https://reviews.llvm.org/D51856

llvm-svn: 342466
This commit is contained in:
Guillaume Chatelet 2018-09-18 11:26:48 +00:00
parent 369049ce40
commit 52b299b2e1
16 changed files with 189 additions and 139 deletions

View File

@ -9,6 +9,7 @@
#include "../Target.h"
#include "../Latency.h"
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
namespace exegesis {
@ -26,33 +27,51 @@ private:
}
};
namespace {
static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) {
switch (RegBitWidth) {
case 32:
return llvm::AArch64::MOVi32imm;
case 64:
return llvm::AArch64::MOVi64imm;
}
llvm_unreachable("Invalid Value Width");
}
// Generates instruction to load an immediate value into a register.
static llvm::MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth,
const llvm::APInt &Value) {
if (Value.getBitWidth() > RegBitWidth)
llvm_unreachable("Value must fit in the Register");
return llvm::MCInstBuilder(getLoadImmediateOpcode(RegBitWidth))
.addReg(Reg)
.addImm(Value.getZExtValue());
}
} // namespace
class ExegesisAArch64Target : public ExegesisTarget {
std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
const llvm::APInt &Value,
unsigned Reg) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getScratchMemoryRegister(const llvm::Triple &) const override {
llvm_unreachable("Not yet implemented");
}
void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
unsigned Offset) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getMaxMemoryAccessSize() const override {
llvm_unreachable("Not yet implemented");
unsigned Reg,
const llvm::APInt &Value) const override {
if (llvm::AArch64::GPR32RegClass.contains(Reg))
return {loadImmediate(Reg, 32, Value)};
if (llvm::AArch64::GPR64RegClass.contains(Reg))
return {loadImmediate(Reg, 64, Value)};
llvm::errs() << "setRegTo is not implemented, results will be unreliable\n";
return {};
}
bool matchesArch(llvm::Triple::ArchType Arch) const override {
return Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be;
}
void addTargetSpecificPasses(llvm::PassManagerBase &PM) const override {
// Function return is a pseudo-instruction that needs to be expanded
PM.add(llvm::createAArch64ExpandPseudoPass());
}
std::unique_ptr<BenchmarkRunner>
createLatencyBenchmarkRunner(const LLVMState &State) const override {
return llvm::make_unique<AArch64LatencyBenchmarkRunner>(State);

View File

@ -29,18 +29,18 @@ static constexpr const char ModuleID[] = "ExegesisInfoTest";
static constexpr const char FunctionID[] = "foo";
static std::vector<llvm::MCInst>
generateSnippetSetupCode(const llvm::ArrayRef<unsigned> RegsToDef,
const ExegesisTarget &ET,
const llvm::LLVMTargetMachine &TM, bool &IsComplete) {
IsComplete = true;
generateSnippetSetupCode(const ExegesisTarget &ET,
const llvm::MCSubtargetInfo *const MSI,
llvm::ArrayRef<RegisterValue> RegisterInitialValues,
bool &IsSnippetSetupComplete) {
std::vector<llvm::MCInst> Result;
// for (const unsigned Reg : RegsToDef) {
// // Load a constant in the register.
// const auto Code = ET.setRegToConstant(*TM.getMCSubtargetInfo(), Reg);
// if (Code.empty())
// IsComplete = false;
// Result.insert(Result.end(), Code.begin(), Code.end());
// }
for (const RegisterValue &RV : RegisterInitialValues) {
// Load a constant in the register.
const auto SetRegisterCode = ET.setRegTo(*MSI, RV.Register, RV.Value);
if (SetRegisterCode.empty())
IsSnippetSetupComplete = false;
Result.insert(Result.end(), SetRegisterCode.begin(), SetRegisterCode.end());
}
return Result;
}
@ -149,7 +149,7 @@ llvm::BitVector getFunctionReservedRegs(const llvm::TargetMachine &TM) {
void assembleToStream(const ExegesisTarget &ET,
std::unique_ptr<llvm::LLVMTargetMachine> TM,
llvm::ArrayRef<unsigned> LiveIns,
llvm::ArrayRef<unsigned> RegsToDef,
llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::ArrayRef<llvm::MCInst> Instructions,
llvm::raw_pwrite_stream &AsmStream) {
std::unique_ptr<llvm::LLVMContext> Context =
@ -171,13 +171,12 @@ void assembleToStream(const ExegesisTarget &ET,
MF.getRegInfo().addLiveIn(Reg);
bool IsSnippetSetupComplete = false;
std::vector<llvm::MCInst> SnippetWithSetup =
generateSnippetSetupCode(RegsToDef, ET, *TM, IsSnippetSetupComplete);
if (!SnippetWithSetup.empty()) {
SnippetWithSetup.insert(SnippetWithSetup.end(), Instructions.begin(),
Instructions.end());
Instructions = SnippetWithSetup;
}
std::vector<llvm::MCInst> Code =
generateSnippetSetupCode(ET, TM->getMCSubtargetInfo(),
RegisterInitialValues, IsSnippetSetupComplete);
Code.insert(Code.end(), Instructions.begin(), Instructions.end());
// If the snippet setup is not complete, we disable liveliness tracking. This
// means that we won't know what values are in the registers.
if (!IsSnippetSetupComplete)
@ -188,7 +187,7 @@ void assembleToStream(const ExegesisTarget &ET,
MF.getRegInfo().freezeReservedRegs(MF);
// Fill the MachineFunction from the instructions.
fillMachineFunction(MF, LiveIns, Instructions);
fillMachineFunction(MF, LiveIns, Code);
// We create the pass manager, run the passes to populate AsmBuffer.
llvm::MCContext &MCContext = MMI->getContext();

View File

@ -39,6 +39,12 @@ class ExegesisTarget;
// convention and target machine).
llvm::BitVector getFunctionReservedRegs(const llvm::TargetMachine &TM);
// A simple object storing the value for a particular register.
struct RegisterValue {
unsigned Register;
llvm::APInt Value;
};
// Creates a temporary `void foo(char*)` function containing the provided
// Instructions. Runs a set of llvm Passes to provide correct prologue and
// epilogue. Once the MachineFunction is ready, it is assembled for TM to
@ -46,7 +52,7 @@ llvm::BitVector getFunctionReservedRegs(const llvm::TargetMachine &TM);
void assembleToStream(const ExegesisTarget &ET,
std::unique_ptr<llvm::LLVMTargetMachine> TM,
llvm::ArrayRef<unsigned> LiveIns,
llvm::ArrayRef<unsigned> RegsToDef,
llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::ArrayRef<llvm::MCInst> Instructions,
llvm::raw_pwrite_stream &AsmStream);

View File

@ -23,7 +23,7 @@ struct BenchmarkCode {
// Before the code is executed some instructions are added to setup the
// registers initial values.
std::vector<unsigned> RegsToDef;
std::vector<RegisterValue> RegisterInitialValues;
// We also need to provide the registers that are live on entry for the
// assembler to generate proper prologue/epilogue.

View File

@ -104,7 +104,7 @@ BenchmarkRunner::writeObjectFile(const BenchmarkCode &BC,
return std::move(E);
llvm::raw_fd_ostream OFS(ResultFD, true /*ShouldClose*/);
assembleToStream(State.getExegesisTarget(), State.createTargetMachine(),
BC.LiveIns, BC.RegsToDef, Code, OFS);
BC.LiveIns, BC.RegisterInitialValues, Code, OFS);
return ResultPath.str();
}

View File

@ -49,7 +49,7 @@ SnippetGenerator::generateConfigurations(unsigned Opcode) const {
}
if (CT.ScratchSpacePointerInReg)
BC.LiveIns.push_back(CT.ScratchSpacePointerInReg);
BC.RegsToDef = computeRegsToDef(CT.Instructions);
BC.RegisterInitialValues = computeRegisterInitialValues(CT.Instructions);
Output.push_back(std::move(BC));
}
return Output;
@ -57,14 +57,14 @@ SnippetGenerator::generateConfigurations(unsigned Opcode) const {
return E.takeError();
}
std::vector<unsigned> SnippetGenerator::computeRegsToDef(
std::vector<RegisterValue> SnippetGenerator::computeRegisterInitialValues(
const std::vector<InstructionBuilder> &Instructions) const {
// Collect all register uses and create an assignment for each of them.
// Ignore memory operands which are handled separately.
// Loop invariant: DefinedRegs[i] is true iif it has been set at least once
// before the current instruction.
llvm::BitVector DefinedRegs = RATC.emptyRegisters();
std::vector<unsigned> RegsToDef;
std::vector<RegisterValue> RIV;
for (const InstructionBuilder &IB : Instructions) {
// Returns the register that this Operand sets or uses, or 0 if this is not
// a register.
@ -82,7 +82,7 @@ std::vector<unsigned> SnippetGenerator::computeRegsToDef(
if (!Op.IsDef) {
const unsigned Reg = GetOpReg(Op);
if (Reg > 0 && !DefinedRegs.test(Reg)) {
RegsToDef.push_back(Reg);
RIV.push_back(RegisterValue{Reg, llvm::APInt()});
DefinedRegs.set(Reg);
}
}
@ -96,7 +96,7 @@ std::vector<unsigned> SnippetGenerator::computeRegsToDef(
}
}
}
return RegsToDef;
return RIV;
}
llvm::Expected<CodeTemplate> SnippetGenerator::generateSelfAliasingCodeTemplate(

View File

@ -48,8 +48,8 @@ public:
generateConfigurations(unsigned Opcode) const;
// Given a snippet, computes which registers the setup code needs to define.
std::vector<unsigned>
computeRegsToDef(const std::vector<InstructionBuilder> &Snippet) const;
std::vector<RegisterValue> computeRegisterInitialValues(
const std::vector<InstructionBuilder> &Snippet) const;
protected:
const LLVMState &State;

View File

@ -90,21 +90,8 @@ namespace {
class ExegesisDefaultTarget : public ExegesisTarget {
private:
std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
const llvm::APInt &Value,
unsigned Reg) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getScratchMemoryRegister(const llvm::Triple &) const override {
llvm_unreachable("Not yet implemented");
}
void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
unsigned Offset) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getMaxMemoryAccessSize() const override {
unsigned Reg,
const llvm::APInt &Value) const override {
llvm_unreachable("Not yet implemented");
}

View File

@ -36,25 +36,31 @@ public:
virtual void addTargetSpecificPasses(llvm::PassManagerBase &PM) const {}
// Generates code to move a constant into a the given register.
virtual std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
const llvm::APInt &Value,
unsigned Reg) const = 0;
// Precondition: Value must fit into Reg.
virtual std::vector<llvm::MCInst>
setRegTo(const llvm::MCSubtargetInfo &STI, unsigned Reg,
const llvm::APInt &Value) const = 0;
// Returns the register pointing to scratch memory, or 0 if this target
// does not support memory operands. The benchmark function uses the
// default calling convention.
virtual unsigned getScratchMemoryRegister(const llvm::Triple &) const = 0;
virtual unsigned getScratchMemoryRegister(const llvm::Triple &) const {
return 0;
}
// Fills memory operands with references to the address at [Reg] + Offset.
virtual void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
unsigned Offset) const = 0;
unsigned Offset) const {
llvm_unreachable(
"fillMemoryOperands() requires getScratchMemoryRegister() > 0");
}
// Returns the maximum number of bytes a load/store instruction can access at
// once. This is typically the size of the largest register available on the
// processor. Note that this only used as a hint to generate independant
// load/stores to/from memory, so the exact returned value does not really
// matter as long as it's large enough.
virtual unsigned getMaxMemoryAccessSize() const = 0;
virtual unsigned getMaxMemoryAccessSize() const { return 0; }
// Creates a snippet generator for the given mode.
std::unique_ptr<SnippetGenerator>

View File

@ -101,8 +101,8 @@ protected:
}
};
static unsigned GetLoadImmediateOpcode(const llvm::APInt &Value) {
switch (Value.getBitWidth()) {
static unsigned GetLoadImmediateOpcode(unsigned RegBitWidth) {
switch (RegBitWidth) {
case 8:
return llvm::X86::MOV8ri;
case 16:
@ -115,10 +115,12 @@ static unsigned GetLoadImmediateOpcode(const llvm::APInt &Value) {
llvm_unreachable("Invalid Value Width");
}
static llvm::MCInst loadImmediate(unsigned Reg, const llvm::APInt &Value,
unsigned MaxBitWidth) {
assert(Value.getBitWidth() <= MaxBitWidth && "Value too big to fit register");
return llvm::MCInstBuilder(GetLoadImmediateOpcode(Value))
// Generates instruction to load an immediate value into a register.
static llvm::MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth,
const llvm::APInt &Value) {
if (Value.getBitWidth() > RegBitWidth)
llvm_unreachable("Value must fit in the Register");
return llvm::MCInstBuilder(GetLoadImmediateOpcode(RegBitWidth))
.addReg(Reg)
.addImm(Value.getZExtValue());
}
@ -165,6 +167,8 @@ static llvm::MCInst releaseStackSpace(unsigned Bytes) {
.addImm(Bytes);
}
// Reserves some space on the stack, fills it with the content of the provided
// constant and provide methods to load the stack value into a register.
struct ConstantInliner {
explicit ConstantInliner(const llvm::APInt &Constant)
: StackSize(Constant.getBitWidth() / 8) {
@ -187,17 +191,19 @@ struct ConstantInliner {
Constant.extractBits(8, ByteOffset * 8).getZExtValue()));
}
std::vector<llvm::MCInst> loadAndFinalize(unsigned Reg, unsigned Opcode,
unsigned BitWidth) {
assert(StackSize * 8 == BitWidth && "Value does not have the correct size");
std::vector<llvm::MCInst> loadAndFinalize(unsigned Reg, unsigned RegBitWidth,
unsigned Opcode) {
assert(StackSize * 8 == RegBitWidth &&
"Value does not have the correct size");
add(loadToReg(Reg, Opcode));
add(releaseStackSpace(StackSize));
return std::move(Instructions);
}
std::vector<llvm::MCInst> loadX87AndFinalize(unsigned Reg, unsigned Opcode,
unsigned BitWidth) {
assert(StackSize * 8 == BitWidth && "Value does not have the correct size");
std::vector<llvm::MCInst>
loadX87AndFinalize(unsigned Reg, unsigned RegBitWidth, unsigned Opcode) {
assert(StackSize * 8 == RegBitWidth &&
"Value does not have the correct size");
add(llvm::MCInstBuilder(Opcode)
.addReg(llvm::X86::RSP) // BaseReg
.addImm(1) // ScaleAmt
@ -211,7 +217,7 @@ struct ConstantInliner {
}
std::vector<llvm::MCInst> popFlagAndFinalize() {
assert(StackSize * 8 == 32 && "Value does not have the correct size");
assert(StackSize * 8 == 64 && "Value does not have the correct size");
add(llvm::MCInstBuilder(llvm::X86::POPF64));
return std::move(Instructions);
}
@ -275,46 +281,46 @@ class ExegesisX86Target : public ExegesisTarget {
}
std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
const llvm::APInt &Value,
unsigned Reg) const override {
unsigned Reg,
const llvm::APInt &Value) const override {
if (llvm::X86::GR8RegClass.contains(Reg))
return {loadImmediate(Reg, Value, 8)};
return {loadImmediate(Reg, 8, Value)};
if (llvm::X86::GR16RegClass.contains(Reg))
return {loadImmediate(Reg, Value, 16)};
return {loadImmediate(Reg, 16, Value)};
if (llvm::X86::GR32RegClass.contains(Reg))
return {loadImmediate(Reg, Value, 32)};
return {loadImmediate(Reg, 32, Value)};
if (llvm::X86::GR64RegClass.contains(Reg))
return {loadImmediate(Reg, Value, 64)};
return {loadImmediate(Reg, 64, Value)};
ConstantInliner CI(Value);
if (llvm::X86::VR64RegClass.contains(Reg))
return CI.loadAndFinalize(Reg, llvm::X86::MMX_MOVQ64rm, 64);
return CI.loadAndFinalize(Reg, 64, llvm::X86::MMX_MOVQ64rm);
if (llvm::X86::VR128XRegClass.contains(Reg)) {
if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Z128rm, 128);
return CI.loadAndFinalize(Reg, 128, llvm::X86::VMOVDQU32Z128rm);
if (STI.getFeatureBits()[llvm::X86::FeatureAVX])
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQUrm, 128);
return CI.loadAndFinalize(Reg, llvm::X86::MOVDQUrm, 128);
return CI.loadAndFinalize(Reg, 128, llvm::X86::VMOVDQUrm);
return CI.loadAndFinalize(Reg, 128, llvm::X86::MOVDQUrm);
}
if (llvm::X86::VR256XRegClass.contains(Reg)) {
if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Z256rm, 256);
return CI.loadAndFinalize(Reg, 256, llvm::X86::VMOVDQU32Z256rm);
if (STI.getFeatureBits()[llvm::X86::FeatureAVX])
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQUYrm, 256);
return CI.loadAndFinalize(Reg, 256, llvm::X86::VMOVDQUYrm);
}
if (llvm::X86::VR512RegClass.contains(Reg))
if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Zrm, 512);
return CI.loadAndFinalize(Reg, 512, llvm::X86::VMOVDQU32Zrm);
if (llvm::X86::RSTRegClass.contains(Reg)) {
if (Value.getBitWidth() == 32)
return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F32m, 32);
return CI.loadX87AndFinalize(Reg, 32, llvm::X86::LD_F32m);
if (Value.getBitWidth() == 64)
return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F64m, 64);
return CI.loadX87AndFinalize(Reg, 64, llvm::X86::LD_F64m);
if (Value.getBitWidth() == 80)
return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F80m, 80);
return CI.loadX87AndFinalize(Reg, 80, llvm::X86::LD_F80m);
}
if (Reg == llvm::X86::EFLAGS)
return CI.popFlagAndFinalize();
llvm_unreachable("Not yet implemented");
return {}; // Not yet implemented.
}
std::unique_ptr<SnippetGenerator>

View File

@ -15,11 +15,16 @@ void InitializeAArch64ExegesisTarget();
namespace {
using llvm::APInt;
using llvm::MCInst;
using testing::Gt;
using testing::IsEmpty;
using testing::Not;
using testing::NotNull;
using testing::SizeIs;
constexpr const char kTriple[] = "aarch64-unknown-linux";
constexpr const char kGenericCpu[] = "generic";
constexpr const char kNoFeatures[] = "";
class AArch64TargetTest : public ::testing::Test {
protected:
@ -29,7 +34,10 @@ protected:
std::string error;
Target_ = llvm::TargetRegistry::lookupTarget(kTriple, error);
EXPECT_THAT(Target_, NotNull());
STI_.reset(
Target_->createMCSubtargetInfo(kTriple, kGenericCpu, kNoFeatures));
}
static void SetUpTestCase() {
LLVMInitializeAArch64TargetInfo();
LLVMInitializeAArch64Target();
@ -37,9 +45,20 @@ protected:
InitializeAArch64ExegesisTarget();
}
std::vector<MCInst> setRegTo(unsigned Reg, const APInt &Value) {
return ExegesisTarget_->setRegTo(*STI_, Reg, Value);
}
const llvm::Target *Target_;
const ExegesisTarget *const ExegesisTarget_;
std::unique_ptr<llvm::MCSubtargetInfo> STI_;
};
TEST_F(AArch64TargetTest, SetRegToConstant) {
// The AArch64 target currently doesn't know how to set register values.
const auto Insts = setRegTo(llvm::AArch64::X0, llvm::APInt());
EXPECT_THAT(Insts, Not(IsEmpty()));
}
} // namespace
} // namespace exegesis

View File

@ -30,12 +30,11 @@ protected:
};
TEST_F(ARMMachineFunctionGeneratorTest, DISABLED_JitFunction) {
Check(ExegesisTarget::getDefault(), {}, llvm::MCInst(), 0x1e, 0xff, 0x2f,
0xe1);
Check({}, llvm::MCInst(), 0x1e, 0xff, 0x2f, 0xe1);
}
TEST_F(ARMMachineFunctionGeneratorTest, DISABLED_JitFunctionADDrr) {
Check(ExegesisTarget::getDefault(), {llvm::ARM::R0},
Check({{llvm::ARM::R0, llvm::APInt()}},
MCInstBuilder(llvm::ARM::ADDrr)
.addReg(llvm::ARM::R0)
.addReg(llvm::ARM::R0)

View File

@ -32,7 +32,9 @@ protected:
const std::string &CpuName)
: TT(TT), CpuName(CpuName),
CanExecute(llvm::Triple(TT).getArch() ==
llvm::Triple(llvm::sys::getProcessTriple()).getArch()) {
llvm::Triple(llvm::sys::getProcessTriple()).getArch()),
ET(ExegesisTarget::lookup(llvm::Triple(TT))) {
assert(ET);
if (!CanExecute) {
llvm::outs() << "Skipping execution, host:"
<< llvm::sys::getProcessTriple() << ", target:" << TT
@ -41,12 +43,12 @@ protected:
}
template <class... Bs>
inline void Check(const ExegesisTarget &ET,
llvm::ArrayRef<unsigned> RegsToDef, llvm::MCInst MCInst,
Bs... Bytes) {
inline void Check(llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::MCInst MCInst, Bs... Bytes) {
ExecutableFunction Function =
(MCInst.getOpcode() == 0) ? assembleToFunction(ET, RegsToDef, {})
: assembleToFunction(ET, RegsToDef, {MCInst});
(MCInst.getOpcode() == 0)
? assembleToFunction(RegisterInitialValues, {})
: assembleToFunction(RegisterInitialValues, {MCInst});
ASSERT_THAT(Function.getFunctionBytes().str(),
testing::ElementsAre(Bytes...));
if (CanExecute) {
@ -70,14 +72,12 @@ private:
}
ExecutableFunction
assembleToFunction(const ExegesisTarget &ET,
llvm::ArrayRef<unsigned> RegsToDef,
assembleToFunction(llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::ArrayRef<llvm::MCInst> Instructions) {
llvm::SmallString<256> Buffer;
llvm::raw_svector_ostream AsmStream(Buffer);
assembleToStream(ET, createTargetMachine(), /*LiveIns=*/{},
RegsToDef, Instructions,
AsmStream);
assembleToStream(*ET, createTargetMachine(), /*LiveIns=*/{},
RegisterInitialValues, Instructions, AsmStream);
return ExecutableFunction(createTargetMachine(),
getObjectFromBuffer(AsmStream.str()));
}
@ -85,6 +85,7 @@ private:
const std::string TT;
const std::string CpuName;
const bool CanExecute;
const ExegesisTarget *const ET;
};
} // namespace exegesis

View File

@ -39,19 +39,12 @@ protected:
};
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunction) {
Check(ExegesisTarget::getDefault(), {}, llvm::MCInst(), 0xc3);
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_Default) {
Check(ExegesisTarget::getDefault(), {EAX},
MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX), 0x31, 0xc0,
0xc3);
Check({}, llvm::MCInst(), 0xc3);
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_X86) {
const auto *ET = ExegesisTarget::lookup(llvm::Triple("x86_64-unknown-linux"));
ASSERT_NE(ET, nullptr);
Check(*ET, {EAX}, MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX),
Check({{EAX, llvm::APInt(32, 1)}},
MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX),
// mov eax, 1
0xb8, 0x01, 0x00, 0x00, 0x00,
// xor eax, eax
@ -59,15 +52,13 @@ TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_X86) {
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionMOV64ri) {
Check(ExegesisTarget::getDefault(), {},
MCInstBuilder(MOV64ri32).addReg(RAX).addImm(42), 0x48, 0xc7, 0xc0, 0x2a,
0x00, 0x00, 0x00, 0xc3);
Check({}, MCInstBuilder(MOV64ri32).addReg(RAX).addImm(42), 0x48, 0xc7, 0xc0,
0x2a, 0x00, 0x00, 0x00, 0xc3);
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionMOV32ri) {
Check(ExegesisTarget::getDefault(), {},
MCInstBuilder(MOV32ri).addReg(EAX).addImm(42), 0xb8, 0x2a, 0x00, 0x00,
0x00, 0xc3);
Check({}, MCInstBuilder(MOV32ri).addReg(EAX).addImm(42), 0xb8, 0x2a, 0x00,
0x00, 0x00, 0xc3);
}
} // namespace

View File

@ -261,7 +261,13 @@ private:
using FakeSnippetGeneratorTest = SnippetGeneratorTest<FakeSnippetGenerator>;
TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd16ri) {
testing::Matcher<const RegisterValue &> IsRegisterValue(unsigned Reg,
llvm::APInt Value) {
return testing::AllOf(testing::Field(&RegisterValue::Register, Reg),
testing::Field(&RegisterValue::Value, Value));
}
TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd16ri) {
// ADD16ri:
// explicit def 0 : reg RegClass=GR16
// explicit use 1 : reg RegClass=GR16 | TIED_TO:0
@ -272,11 +278,11 @@ TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd16ri) {
llvm::MCOperand::createReg(llvm::X86::AX);
std::vector<InstructionBuilder> Snippet;
Snippet.push_back(std::move(IB));
const auto RegsToDef = Generator.computeRegsToDef(Snippet);
EXPECT_THAT(RegsToDef, UnorderedElementsAre(llvm::X86::AX));
const auto RIV = Generator.computeRegisterInitialValues(Snippet);
EXPECT_THAT(RIV, ElementsAre(IsRegisterValue(llvm::X86::AX, llvm::APInt())));
}
TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd64rr) {
TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd64rr) {
// ADD64rr:
// mov64ri rax, 42
// add64rr rax, rax, rbx
@ -298,8 +304,8 @@ TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd64rr) {
Snippet.push_back(std::move(Add));
}
const auto RegsToDef = Generator.computeRegsToDef(Snippet);
EXPECT_THAT(RegsToDef, UnorderedElementsAre(llvm::X86::RBX));
const auto RIV = Generator.computeRegisterInitialValues(Snippet);
EXPECT_THAT(RIV, ElementsAre(IsRegisterValue(llvm::X86::RBX, llvm::APInt())));
}
} // namespace

View File

@ -125,7 +125,7 @@ protected:
}
std::vector<MCInst> setRegTo(unsigned Reg, const APInt &Value) {
return ExegesisTarget_->setRegTo(*STI_, Value, Reg);
return ExegesisTarget_->setRegTo(*STI_, Reg, Value);
}
const llvm::Target *Target_;
@ -137,6 +137,16 @@ using Core2TargetTest = X86TargetTest<kCpuCore2, kFeaturesEmpty>;
using Core2AvxTargetTest = X86TargetTest<kCpuCore2, kFeaturesAvx>;
using Core2Avx512TargetTest = X86TargetTest<kCpuCore2, kFeaturesAvx512VL>;
TEST_F(Core2TargetTest, SetFlags) {
const unsigned Reg = llvm::X86::EFLAGS;
EXPECT_THAT(
setRegTo(Reg, APInt(64, 0x1111222233334444ULL)),
ElementsAre(IsStackAllocate(8),
IsMovValueToStack(llvm::X86::MOV32mi, 0x33334444UL, 0),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 4),
OpcodeIs(llvm::X86::POPF64)));
}
TEST_F(Core2TargetTest, SetRegToGR8Value) {
const uint8_t Value = 0xFFU;
const unsigned Reg = llvm::X86::AL;
@ -285,7 +295,7 @@ TEST_F(Core2TargetTest, SetRegToST0_32Bits) {
setRegTo(llvm::X86::ST0, APInt(32, 0x11112222ULL)),
ElementsAre(IsStackAllocate(4),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 0),
testing::A<MCInst>(), IsStackDeallocate(4)));
OpcodeIs(llvm::X86::LD_F32m), IsStackDeallocate(4)));
}
TEST_F(Core2TargetTest, SetRegToST1_32Bits) {
@ -295,7 +305,8 @@ TEST_F(Core2TargetTest, SetRegToST1_32Bits) {
setRegTo(llvm::X86::ST1, APInt(32, 0x11112222ULL)),
ElementsAre(IsStackAllocate(4),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 0),
testing::A<MCInst>(), CopySt0ToSt1, IsStackDeallocate(4)));
OpcodeIs(llvm::X86::LD_F32m), CopySt0ToSt1,
IsStackDeallocate(4)));
}
TEST_F(Core2TargetTest, SetRegToST0_64Bits) {
@ -304,7 +315,7 @@ TEST_F(Core2TargetTest, SetRegToST0_64Bits) {
ElementsAre(IsStackAllocate(8),
IsMovValueToStack(llvm::X86::MOV32mi, 0x33334444UL, 0),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 4),
testing::A<MCInst>(), IsStackDeallocate(8)));
OpcodeIs(llvm::X86::LD_F64m), IsStackDeallocate(8)));
}
TEST_F(Core2TargetTest, SetRegToST0_80Bits) {
@ -314,7 +325,7 @@ TEST_F(Core2TargetTest, SetRegToST0_80Bits) {
IsMovValueToStack(llvm::X86::MOV32mi, 0x44445555UL, 0),
IsMovValueToStack(llvm::X86::MOV32mi, 0x22223333UL, 4),
IsMovValueToStack(llvm::X86::MOV16mi, 0x1111UL, 8),
testing::A<MCInst>(), IsStackDeallocate(10)));
OpcodeIs(llvm::X86::LD_F80m), IsStackDeallocate(10)));
}
} // namespace