mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-19 11:02:59 +02:00
[lanai] Add areMemAccessesTriviallyDisjoint, getMemOpBaseRegImmOfs and getMemOpBaseRegImmOfsWidth.
Summary: Add getMemOpBaseRegImmOfsWidth to enable determining independence during MiSched. Reviewers: eliben, majnemer Subscribers: mcrosier, llvm-commits Differential Revision: http://reviews.llvm.org/D18903 llvm-svn: 266338
This commit is contained in:
parent
c0b7282ebc
commit
77c93881e7
@ -23,10 +23,11 @@
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/TargetRegistry.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
#define GET_INSTRINFO_CTOR_DTOR
|
||||
#include "LanaiGenInstrInfo.inc"
|
||||
|
||||
namespace llvm {
|
||||
LanaiInstrInfo::LanaiInstrInfo()
|
||||
: LanaiGenInstrInfo(Lanai::ADJCALLSTACKDOWN, Lanai::ADJCALLSTACKUP),
|
||||
RegisterInfo() {}
|
||||
@ -84,6 +85,38 @@ void LanaiInstrInfo::loadRegFromStackSlot(
|
||||
.addImm(LPAC::ADD);
|
||||
}
|
||||
|
||||
bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
|
||||
MachineInstr *MIb,
|
||||
AliasAnalysis *AA) const {
|
||||
assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
|
||||
assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
|
||||
|
||||
if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
|
||||
MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
|
||||
return false;
|
||||
|
||||
// Retrieve the base register, offset from the base register and width. Width
|
||||
// is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
|
||||
// base registers are identical, and the offset of a lower memory access +
|
||||
// the width doesn't overlap the offset of a higher memory access,
|
||||
// then the memory accesses are different.
|
||||
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
||||
unsigned BaseRegA = 0, BaseRegB = 0;
|
||||
int64_t OffsetA = 0, OffsetB = 0;
|
||||
unsigned int WidthA = 0, WidthB = 0;
|
||||
if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
|
||||
getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
|
||||
if (BaseRegA == BaseRegB) {
|
||||
int LowOffset = std::min(OffsetA, OffsetB);
|
||||
int HighOffset = std::max(OffsetA, OffsetB);
|
||||
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
|
||||
if (LowOffset + LowWidth <= HighOffset)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool LanaiInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
|
||||
return false;
|
||||
}
|
||||
@ -321,4 +354,61 @@ unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
} // namespace llvm
|
||||
|
||||
bool LanaiInstrInfo::getMemOpBaseRegImmOfsWidth(
|
||||
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
// Handle only loads/stores with base register followed by immediate offset
|
||||
// and with add as ALU op.
|
||||
if (LdSt->getNumOperands() != 4)
|
||||
return false;
|
||||
if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm() ||
|
||||
!(LdSt->getOperand(3).isImm() &&
|
||||
LdSt->getOperand(3).getImm() == LPAC::ADD))
|
||||
return false;
|
||||
|
||||
switch (LdSt->getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
case Lanai::LDW_RI:
|
||||
case Lanai::LDW_RR:
|
||||
case Lanai::SW_RR:
|
||||
case Lanai::SW_RI:
|
||||
Width = 4;
|
||||
break;
|
||||
case Lanai::LDHs_RI:
|
||||
case Lanai::LDHz_RI:
|
||||
case Lanai::STH_RI:
|
||||
Width = 2;
|
||||
break;
|
||||
case Lanai::LDBs_RI:
|
||||
case Lanai::LDBz_RI:
|
||||
case Lanai::STB_RI:
|
||||
Width = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
BaseReg = LdSt->getOperand(1).getReg();
|
||||
Offset = LdSt->getOperand(2).getImm();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LanaiInstrInfo::getMemOpBaseRegImmOfs(
|
||||
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
switch (LdSt->getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
case Lanai::LDW_RI:
|
||||
case Lanai::LDW_RR:
|
||||
case Lanai::SW_RR:
|
||||
case Lanai::SW_RI:
|
||||
case Lanai::LDHs_RI:
|
||||
case Lanai::LDHz_RI:
|
||||
case Lanai::STH_RI:
|
||||
case Lanai::LDBs_RI:
|
||||
case Lanai::LDBz_RI:
|
||||
unsigned Width;
|
||||
return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,9 @@ public:
|
||||
return RegisterInfo;
|
||||
}
|
||||
|
||||
bool areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
|
||||
AliasAnalysis *AA) const override;
|
||||
|
||||
unsigned isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const override;
|
||||
|
||||
@ -64,6 +67,14 @@ public:
|
||||
|
||||
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfsWidth(MachineInstr *LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TrueBlock,
|
||||
MachineBasicBlock *&FalseBlock,
|
||||
SmallVectorImpl<MachineOperand> &Condition,
|
||||
|
55
test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
Normal file
55
test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
Normal file
@ -0,0 +1,55 @@
|
||||
; RUN: llc %s -mtriple=lanai-unknown-unknown -debug-only=misched -o /dev/null 2>&1 | FileCheck %s
|
||||
|
||||
; Make sure there are no control dependencies between memory operations that
|
||||
; are trivially disjoint.
|
||||
|
||||
; Function Attrs: norecurse nounwind uwtable
|
||||
define i32 @foo(i8* inreg nocapture %x) {
|
||||
entry:
|
||||
%0 = bitcast i8* %x to i32*
|
||||
store i32 1, i32* %0, align 4
|
||||
%arrayidx1 = getelementptr inbounds i8, i8* %x, i32 4
|
||||
%1 = bitcast i8* %arrayidx1 to i32*
|
||||
store i32 2, i32* %1, align 4
|
||||
%arrayidx2 = getelementptr inbounds i8, i8* %x, i32 12
|
||||
%2 = bitcast i8* %arrayidx2 to i32*
|
||||
%3 = load i32, i32* %2, align 4
|
||||
%arrayidx3 = getelementptr inbounds i8, i8* %x, i32 10
|
||||
%4 = bitcast i8* %arrayidx3 to i16*
|
||||
store i16 3, i16* %4, align 2
|
||||
%5 = bitcast i8* %arrayidx2 to i16*
|
||||
store i16 4, i16* %5, align 2
|
||||
%arrayidx5 = getelementptr inbounds i8, i8* %x, i32 14
|
||||
store i8 5, i8* %arrayidx5, align 1
|
||||
%arrayidx6 = getelementptr inbounds i8, i8* %x, i32 15
|
||||
store i8 6, i8* %arrayidx6, align 1
|
||||
%arrayidx7 = getelementptr inbounds i8, i8* %x, i32 16
|
||||
store i8 7, i8* %arrayidx7, align 1
|
||||
ret i32 %3
|
||||
}
|
||||
|
||||
; CHECK-LABEL: foo
|
||||
; CHECK-LABEL: SU({{.*}}): SW_RI{{.*}}, 0,
|
||||
; CHECK: # preds left : 2
|
||||
; CHECK: # succs left : 0
|
||||
; CHECK-LABEL: SU({{.*}}): SW_RI{{.*}}, 4,
|
||||
; CHECK: # preds left : 2
|
||||
; CHECK: # succs left : 0
|
||||
; CHECK-LABEL: SU({{.*}}): %vreg{{.*}}<def> = LDW_RI{{.*}}, 12,
|
||||
; CHECK: # preds left : 1
|
||||
; CHECK: # succs left : 4
|
||||
; CHECK-LABEL: SU({{.*}}): STH_RI{{.*}}, 10,
|
||||
; CHECK: # preds left : 2
|
||||
; CHECK: # succs left : 0
|
||||
; CHECK-LABEL: SU({{.*}}): STH_RI{{.*}}, 12,
|
||||
; CHECK: # preds left : 3
|
||||
; CHECK: # succs left : 0
|
||||
; CHECK-LABEL: SU({{.*}}): STB_RI{{.*}}, 14,
|
||||
; CHECK: # preds left : 3
|
||||
; CHECK: # succs left : 0
|
||||
; CHECK-LABEL: SU({{.*}}): STB_RI{{.*}}, 15,
|
||||
; CHECK: # preds left : 3
|
||||
; CHECK: # succs left : 0
|
||||
; CHECK-LABEL: SU({{.*}}): STB_RI{{.*}}, 16,
|
||||
; CHECK: # preds left : 2
|
||||
; CHECK: # succs left : 0
|
Loading…
Reference in New Issue
Block a user