mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-25 20:23:11 +01:00
ac11cfc716
This also adds new interfaces for the fixed- and scalable case: * LLT::fixed_vector * LLT::scalable_vector The strategy for migrating to the new interfaces was as follows: * If the new LLT is a (modified) clone of another LLT, taking the same number of elements, then use LLT::vector(OtherTy.getElementCount()) or if the number of elements is halfed/doubled, it uses .divideCoefficientBy(2) or operator*. That is because there is no reason to specifically restrict the types to 'fixed_vector'. * If the algorithm works on the number of elements (as unsigned), then just use fixed_vector. This will need to be fixed up in the future when modifying the algorithm to also work for scalable vectors, and will need then need additional tests to confirm the behaviour works the same for scalable vectors. * If the test used the '/*Scalable=*/true` flag of LLT::vector, then this is replaced by LLT::scalable_vector. Reviewed By: aemerson Differential Revision: https://reviews.llvm.org/D104451
167 lines
6.3 KiB
C++
167 lines
6.3 KiB
C++
//===- CSETest.cpp -----------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "GISelMITest.h"
|
|
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
|
|
#include "gtest/gtest.h"
|
|
|
|
namespace {
|
|
|
|
TEST_F(AArch64GISelMITest, TestCSE) {
|
|
setUp();
|
|
if (!TM)
|
|
return;
|
|
|
|
LLT s16{LLT::scalar(16)};
|
|
LLT s32{LLT::scalar(32)};
|
|
auto MIBInput = B.buildInstr(TargetOpcode::G_TRUNC, {s16}, {Copies[0]});
|
|
auto MIBInput1 = B.buildInstr(TargetOpcode::G_TRUNC, {s16}, {Copies[1]});
|
|
auto MIBAdd = B.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
|
|
GISelCSEInfo CSEInfo;
|
|
CSEInfo.setCSEConfig(std::make_unique<CSEConfigFull>());
|
|
CSEInfo.analyze(*MF);
|
|
B.setCSEInfo(&CSEInfo);
|
|
CSEMIRBuilder CSEB(B.getState());
|
|
|
|
CSEB.setInsertPt(B.getMBB(), B.getInsertPt());
|
|
Register AddReg = MRI->createGenericVirtualRegister(s16);
|
|
auto MIBAddCopy =
|
|
CSEB.buildInstr(TargetOpcode::G_ADD, {AddReg}, {MIBInput, MIBInput});
|
|
EXPECT_EQ(MIBAddCopy->getOpcode(), TargetOpcode::COPY);
|
|
auto MIBAdd2 =
|
|
CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
|
|
EXPECT_TRUE(&*MIBAdd == &*MIBAdd2);
|
|
auto MIBAdd4 =
|
|
CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
|
|
EXPECT_TRUE(&*MIBAdd == &*MIBAdd4);
|
|
auto MIBAdd5 =
|
|
CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput1});
|
|
EXPECT_TRUE(&*MIBAdd != &*MIBAdd5);
|
|
|
|
// Try building G_CONSTANTS.
|
|
auto MIBCst = CSEB.buildConstant(s32, 0);
|
|
auto MIBCst1 = CSEB.buildConstant(s32, 0);
|
|
EXPECT_TRUE(&*MIBCst == &*MIBCst1);
|
|
// Try the CFing of BinaryOps.
|
|
auto MIBCF1 = CSEB.buildInstr(TargetOpcode::G_ADD, {s32}, {MIBCst, MIBCst});
|
|
EXPECT_TRUE(&*MIBCF1 == &*MIBCst);
|
|
|
|
// Try out building FCONSTANTs.
|
|
auto MIBFP0 = CSEB.buildFConstant(s32, 1.0);
|
|
auto MIBFP0_1 = CSEB.buildFConstant(s32, 1.0);
|
|
EXPECT_TRUE(&*MIBFP0 == &*MIBFP0_1);
|
|
CSEInfo.print();
|
|
|
|
// Make sure buildConstant with a vector type doesn't crash, and the elements
|
|
// CSE.
|
|
auto Splat0 = CSEB.buildConstant(LLT::fixed_vector(2, s32), 0);
|
|
EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR, Splat0->getOpcode());
|
|
EXPECT_EQ(Splat0.getReg(1), Splat0.getReg(2));
|
|
EXPECT_EQ(&*MIBCst, MRI->getVRegDef(Splat0.getReg(1)));
|
|
|
|
auto FSplat = CSEB.buildFConstant(LLT::fixed_vector(2, s32), 1.0);
|
|
EXPECT_EQ(TargetOpcode::G_BUILD_VECTOR, FSplat->getOpcode());
|
|
EXPECT_EQ(FSplat.getReg(1), FSplat.getReg(2));
|
|
EXPECT_EQ(&*MIBFP0, MRI->getVRegDef(FSplat.getReg(1)));
|
|
|
|
// Check G_UNMERGE_VALUES
|
|
auto MIBUnmerge = CSEB.buildUnmerge({s32, s32}, Copies[0]);
|
|
auto MIBUnmerge2 = CSEB.buildUnmerge({s32, s32}, Copies[0]);
|
|
EXPECT_TRUE(&*MIBUnmerge == &*MIBUnmerge2);
|
|
|
|
// Check G_IMPLICIT_DEF
|
|
auto Undef0 = CSEB.buildUndef(s32);
|
|
auto Undef1 = CSEB.buildUndef(s32);
|
|
EXPECT_EQ(&*Undef0, &*Undef1);
|
|
|
|
// If the observer is installed to the MF, CSE can also
|
|
// track new instructions built without the CSEBuilder and
|
|
// the newly built instructions are available for CSEing next
|
|
// time a build call is made through the CSEMIRBuilder.
|
|
// Additionally, the CSE implementation lazily hashes instructions
|
|
// (every build call) to give chance for the instruction to be fully
|
|
// built (say using .addUse().addDef().. so on).
|
|
GISelObserverWrapper WrapperObserver(&CSEInfo);
|
|
RAIIMFObsDelInstaller Installer(*MF, WrapperObserver);
|
|
MachineIRBuilder RegularBuilder(*MF);
|
|
RegularBuilder.setInsertPt(*EntryMBB, EntryMBB->begin());
|
|
auto NonCSEFMul = RegularBuilder.buildInstr(TargetOpcode::G_AND)
|
|
.addDef(MRI->createGenericVirtualRegister(s32))
|
|
.addUse(Copies[0])
|
|
.addUse(Copies[1]);
|
|
auto CSEFMul =
|
|
CSEB.buildInstr(TargetOpcode::G_AND, {s32}, {Copies[0], Copies[1]});
|
|
EXPECT_EQ(&*CSEFMul, &*NonCSEFMul);
|
|
|
|
auto ExtractMIB = CSEB.buildInstr(TargetOpcode::G_EXTRACT, {s16},
|
|
{Copies[0], static_cast<uint64_t>(0)});
|
|
auto ExtractMIB1 = CSEB.buildInstr(TargetOpcode::G_EXTRACT, {s16},
|
|
{Copies[0], static_cast<uint64_t>(0)});
|
|
auto ExtractMIB2 = CSEB.buildInstr(TargetOpcode::G_EXTRACT, {s16},
|
|
{Copies[0], static_cast<uint64_t>(1)});
|
|
EXPECT_EQ(&*ExtractMIB, &*ExtractMIB1);
|
|
EXPECT_NE(&*ExtractMIB, &*ExtractMIB2);
|
|
}
|
|
|
|
TEST_F(AArch64GISelMITest, TestCSEConstantConfig) {
|
|
setUp();
|
|
if (!TM)
|
|
return;
|
|
|
|
LLT s16{LLT::scalar(16)};
|
|
auto MIBInput = B.buildInstr(TargetOpcode::G_TRUNC, {s16}, {Copies[0]});
|
|
auto MIBAdd = B.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
|
|
auto MIBZero = B.buildConstant(s16, 0);
|
|
GISelCSEInfo CSEInfo;
|
|
CSEInfo.setCSEConfig(std::make_unique<CSEConfigConstantOnly>());
|
|
CSEInfo.analyze(*MF);
|
|
B.setCSEInfo(&CSEInfo);
|
|
CSEMIRBuilder CSEB(B.getState());
|
|
CSEB.setInsertPt(*EntryMBB, EntryMBB->begin());
|
|
auto MIBAdd1 =
|
|
CSEB.buildInstr(TargetOpcode::G_ADD, {s16}, {MIBInput, MIBInput});
|
|
// We should CSE constants only. Adds should not be CSEd.
|
|
EXPECT_TRUE(MIBAdd1->getOpcode() != TargetOpcode::COPY);
|
|
EXPECT_TRUE(&*MIBAdd1 != &*MIBAdd);
|
|
// We should CSE constant.
|
|
auto MIBZeroTmp = CSEB.buildConstant(s16, 0);
|
|
EXPECT_TRUE(&*MIBZero == &*MIBZeroTmp);
|
|
|
|
// Check G_IMPLICIT_DEF
|
|
auto Undef0 = CSEB.buildUndef(s16);
|
|
auto Undef1 = CSEB.buildUndef(s16);
|
|
EXPECT_EQ(&*Undef0, &*Undef1);
|
|
}
|
|
|
|
TEST_F(AArch64GISelMITest, TestCSEImmediateNextCSE) {
|
|
setUp();
|
|
if (!TM)
|
|
return;
|
|
|
|
LLT s32{LLT::scalar(32)};
|
|
// We want to check that when the CSE hit is on the next instruction, i.e. at
|
|
// the current insert pt, that the insertion point is moved ahead of the
|
|
// instruction.
|
|
|
|
GISelCSEInfo CSEInfo;
|
|
CSEInfo.setCSEConfig(std::make_unique<CSEConfigConstantOnly>());
|
|
CSEInfo.analyze(*MF);
|
|
B.setCSEInfo(&CSEInfo);
|
|
CSEMIRBuilder CSEB(B.getState());
|
|
CSEB.buildConstant(s32, 0);
|
|
auto MIBCst2 = CSEB.buildConstant(s32, 2);
|
|
|
|
// Move the insert point before the second constant.
|
|
CSEB.setInsertPt(CSEB.getMBB(), --CSEB.getInsertPt());
|
|
auto MIBCst3 = CSEB.buildConstant(s32, 2);
|
|
EXPECT_TRUE(&*MIBCst2 == &*MIBCst3);
|
|
EXPECT_TRUE(CSEB.getInsertPt() == CSEB.getMBB().end());
|
|
}
|
|
|
|
} // namespace
|