diff --git a/lib/Target/AMDGPU/GCNDPPCombine.cpp b/lib/Target/AMDGPU/GCNDPPCombine.cpp index 1fa75504493..1b499849f24 100644 --- a/lib/Target/AMDGPU/GCNDPPCombine.cpp +++ b/lib/Target/AMDGPU/GCNDPPCombine.cpp @@ -105,6 +105,11 @@ public: MachineFunctionPass::getAnalysisUsage(AU); } + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties() + .set(MachineFunctionProperties::Property::IsSSA); + } + private: int getDPPOp(unsigned Op) const; }; @@ -564,8 +569,6 @@ bool GCNDPPCombine::runOnMachineFunction(MachineFunction &MF) { MRI = &MF.getRegInfo(); TII = ST.getInstrInfo(); - assert(MRI->isSSA() && "Must be run on SSA"); - bool Changed = false; for (auto &MBB : MF) { for (auto I = MBB.rbegin(), E = MBB.rend(); I != E;) { diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp index cec7f563f48..8e6c51deaa6 100644 --- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp +++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp @@ -57,7 +57,6 @@ using namespace llvm; #define DEBUG_TYPE "vec-merger" static bool isImplicitlyDef(MachineRegisterInfo &MRI, unsigned Reg) { - assert(MRI.isSSA()); if (Register::isPhysicalRegister(Reg)) return false; const MachineInstr *MI = MRI.getUniqueVRegDef(Reg); @@ -130,6 +129,11 @@ public: MachineFunctionPass::getAnalysisUsage(AU); } + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties() + .set(MachineFunctionProperties::Property::IsSSA); + } + StringRef getPassName() const override { return "R600 Vector Registers Merge Pass"; } diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 62a7d9cd0ed..140e5509b87 100644 --- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -292,6 +292,11 @@ public: MachineFunctionPass::getAnalysisUsage(AU); } + + MachineFunctionProperties getRequiredProperties() const override { + return MachineFunctionProperties() + .set(MachineFunctionProperties::Property::IsSSA); + } }; static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) { @@ -2165,8 +2170,6 @@ bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { MRI = &MF.getRegInfo(); AA = &getAnalysis().getAAResults(); - assert(MRI->isSSA() && "Must be run on SSA"); - LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n"); bool Modified = false; diff --git a/test/CodeGen/AMDGPU/merge-load-store-vreg.mir b/test/CodeGen/AMDGPU/merge-load-store-vreg.mir index 0f3913e9e45..5e13ed178aa 100644 --- a/test/CodeGen/AMDGPU/merge-load-store-vreg.mir +++ b/test/CodeGen/AMDGPU/merge-load-store-vreg.mir @@ -115,7 +115,7 @@ body: | %2:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %1, implicit $exec V_CMP_NE_U32_e32 1, %2, implicit-def $vcc, implicit $exec DS_WRITE_B32 %0.sub0, %0.sub0, 1024, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp) - undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + %3:vreg_64 = V_LSHLREV_B64 0, 0, implicit $exec DS_WRITE_B32 %0.sub0, %3.sub0, 1056, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp1) %4:vgpr_32 = DS_READ_B32 %3.sub0, 1088, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp2) %5:vgpr_32 = DS_READ_B32 %3.sub0, 1120, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp3) @@ -146,7 +146,7 @@ body: | %2:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %1, implicit $exec V_CMP_NE_U32_e32 1, %2, implicit-def $vcc, implicit $exec DS_WRITE_B32 %0.sub0, %0.sub0, 0, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp) - undef %3.sub0:vreg_64 = V_MOV_B32_e32 0, implicit $exec + %3:vreg_64 = V_LSHLREV_B64 0, 0, implicit $exec DS_WRITE_B32 %0.sub0, %3.sub0, 32, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp1) %4:vgpr_32 = DS_READ_B32 %3.sub0, 0, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp2) %5:vgpr_32 = DS_READ_B32 %3.sub0, 32, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp3)