1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[AMDGPU] NFC refactoring in isel for buffer access intrinsics

Rename getBufferOffsetForMMO to updateBufferMMO and pass in the MMO to
be updated, in preparation for the bug fix in D106284.

Call updateBufferMMO consistently for all buffer intrinsics, even the
ones that use setBufferOffsets to decompose a combined offset
expression.

Add a getIdxEn helper function.

Differential Revision: https://reviews.llvm.org/D106354
This commit is contained in:
Jay Foad 2021-07-13 17:03:57 +01:00
parent b11b07e0b8
commit fd3020376e
2 changed files with 54 additions and 79 deletions

View File

@ -6795,28 +6795,24 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
} }
} }
// This function computes an appropriate offset to pass to /// Update \p MMO based on the offset inputs to an intrinsic. If any of the
// MachineMemOperand::setOffset() based on the offset inputs to /// offsets are non-constant or if \p VIndex is non-zero then this function does
// an intrinsic. If any of the offsets are non-contstant or /// nothing. Otherwise, it sets the MMO offset to the sum of \p VOffset, \p
// if VIndex is non-zero then this function returns 0. Otherwise, /// SOffset, and \p Offset.
// it returns the sum of VOffset, SOffset, and Offset. static void updateBufferMMO(MachineMemOperand *MMO, SDValue VOffset,
static unsigned getBufferOffsetForMMO(SDValue VOffset, SDValue SOffset, SDValue Offset,
SDValue SOffset,
SDValue Offset,
SDValue VIndex = SDValue()) { SDValue VIndex = SDValue()) {
if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) || if (!isa<ConstantSDNode>(VOffset) || !isa<ConstantSDNode>(SOffset) ||
!isa<ConstantSDNode>(Offset)) !isa<ConstantSDNode>(Offset))
return 0; return;
if (VIndex) { if (VIndex && (!isa<ConstantSDNode>(VIndex) ||
if (!isa<ConstantSDNode>(VIndex) || !cast<ConstantSDNode>(VIndex)->isNullValue()) !cast<ConstantSDNode>(VIndex)->isNullValue()))
return 0; return;
}
return cast<ConstantSDNode>(VOffset)->getSExtValue() + MMO->setOffset(cast<ConstantSDNode>(VOffset)->getSExtValue() +
cast<ConstantSDNode>(SOffset)->getSExtValue() + cast<ConstantSDNode>(SOffset)->getSExtValue() +
cast<ConstantSDNode>(Offset)->getSExtValue(); cast<ConstantSDNode>(Offset)->getSExtValue());
} }
SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op, SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op,
@ -6839,13 +6835,21 @@ SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op,
}; };
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6])); updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]);
EVT MemVT = VData.getValueType(); EVT MemVT = VData.getValueType();
return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT,
M->getMemOperand()); M->getMemOperand());
} }
// Return a value to use for the idxen operand by examining the vindex operand.
static unsigned getIdxEn(SDValue VIndex) {
if (auto VIndexC = dyn_cast<ConstantSDNode>(VIndex))
// No need to set idxen if vindex is known to be zero.
return VIndexC->getZExtValue() != 0;
return 1;
}
SDValue SDValue
SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG, SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
unsigned NewOpcode) const { unsigned NewOpcode) const {
@ -6866,8 +6870,7 @@ SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
}; };
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6], updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
Ops[3]));
EVT MemVT = VData.getValueType(); EVT MemVT = VData.getValueType();
return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT, return DAG.getMemIntrinsicNode(NewOpcode, DL, Op->getVTList(), Ops, MemVT,
@ -6980,9 +6983,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_buffer_load_format: { case Intrinsic::amdgcn_buffer_load_format: {
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); unsigned Glc = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
unsigned IdxEn = 1; unsigned IdxEn = getIdxEn(Op.getOperand(3));
if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
IdxEn = Idx->getZExtValue() != 0;
SDValue Ops[] = { SDValue Ops[] = {
Op.getOperand(0), // Chain Op.getOperand(0), // Chain
Op.getOperand(2), // rsrc Op.getOperand(2), // rsrc
@ -6993,11 +6994,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
}; };
setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
unsigned Offset = setBufferOffsets(Op.getOperand(4), DAG, &Ops[3]);
// We don't know the offset if vindex is non-zero, so clear it.
if (IdxEn)
Offset = 0;
unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ? unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT; AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
@ -7005,7 +7002,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
EVT IntVT = VT.changeTypeToInteger(); EVT IntVT = VT.changeTypeToInteger();
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(Offset); updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]);
EVT LoadVT = Op.getValueType(); EVT LoadVT = Op.getValueType();
if (LoadVT.getScalarType() == MVT::f16) if (LoadVT.getScalarType() == MVT::f16)
@ -7037,7 +7034,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
}; };
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5])); updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5]);
return lowerIntrinsicLoad(M, IsFormat, DAG, Ops); return lowerIntrinsicLoad(M, IsFormat, DAG, Ops);
} }
case Intrinsic::amdgcn_struct_buffer_load: case Intrinsic::amdgcn_struct_buffer_load:
@ -7057,8 +7054,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
}; };
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[3], Ops[4], Ops[5], updateBufferMMO(M->getMemOperand(), Ops[3], Ops[4], Ops[5], Ops[2]);
Ops[2]));
return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops); return lowerIntrinsicLoad(cast<MemSDNode>(Op), IsFormat, DAG, Ops);
} }
case Intrinsic::amdgcn_tbuffer_load: { case Intrinsic::amdgcn_tbuffer_load: {
@ -7069,9 +7065,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue(); unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(8))->getZExtValue();
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); unsigned Glc = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); unsigned Slc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
unsigned IdxEn = 1; unsigned IdxEn = getIdxEn(Op.getOperand(3));
if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(3)))
IdxEn = Idx->getZExtValue() != 0;
SDValue Ops[] = { SDValue Ops[] = {
Op.getOperand(0), // Chain Op.getOperand(0), // Chain
Op.getOperand(2), // rsrc Op.getOperand(2), // rsrc
@ -7152,9 +7146,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_buffer_atomic_xor: case Intrinsic::amdgcn_buffer_atomic_xor:
case Intrinsic::amdgcn_buffer_atomic_fadd: { case Intrinsic::amdgcn_buffer_atomic_fadd: {
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); unsigned Slc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
unsigned IdxEn = 1; unsigned IdxEn = getIdxEn(Op.getOperand(4));
if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
IdxEn = Idx->getZExtValue() != 0;
SDValue Ops[] = { SDValue Ops[] = {
Op.getOperand(0), // Chain Op.getOperand(0), // Chain
Op.getOperand(2), // vdata Op.getOperand(2), // vdata
@ -7166,14 +7158,12 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
}; };
unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
// We don't know the offset if vindex is non-zero, so clear it.
if (IdxEn)
Offset = 0;
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(Offset); updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
unsigned Opcode = 0; unsigned Opcode = 0;
switch (IntrID) { switch (IntrID) {
@ -7296,9 +7286,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_buffer_atomic_cmpswap: { case Intrinsic::amdgcn_buffer_atomic_cmpswap: {
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
unsigned IdxEn = 1; unsigned IdxEn = getIdxEn(Op.getOperand(5));
if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(5)))
IdxEn = Idx->getZExtValue() != 0;
SDValue Ops[] = { SDValue Ops[] = {
Op.getOperand(0), // Chain Op.getOperand(0), // Chain
Op.getOperand(2), // src Op.getOperand(2), // src
@ -7311,13 +7299,11 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy DAG.getTargetConstant(Slc << 1, DL, MVT::i32), // cachepolicy
DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
}; };
unsigned Offset = setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]); setBufferOffsets(Op.getOperand(6), DAG, &Ops[5]);
// We don't know the offset if vindex is non-zero, so clear it.
if (IdxEn)
Offset = 0;
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(Offset); updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]);
return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Op->getVTList(), Ops, VT, M->getMemOperand()); Op->getVTList(), Ops, VT, M->getMemOperand());
@ -7338,7 +7324,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
}; };
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7])); updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7]);
return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Op->getVTList(), Ops, VT, M->getMemOperand()); Op->getVTList(), Ops, VT, M->getMemOperand());
@ -7359,8 +7345,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
}; };
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
auto *M = cast<MemSDNode>(Op); auto *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[5], Ops[6], Ops[7], updateBufferMMO(M->getMemOperand(), Ops[5], Ops[6], Ops[7], Ops[4]);
Ops[4]));
return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL, return DAG.getMemIntrinsicNode(AMDGPUISD::BUFFER_ATOMIC_CMPSWAP, DL,
Op->getVTList(), Ops, VT, M->getMemOperand()); Op->getVTList(), Ops, VT, M->getMemOperand());
@ -7657,9 +7642,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue(); unsigned Nfmt = cast<ConstantSDNode>(Op.getOperand(9))->getZExtValue();
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue(); unsigned Glc = cast<ConstantSDNode>(Op.getOperand(10))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue(); unsigned Slc = cast<ConstantSDNode>(Op.getOperand(11))->getZExtValue();
unsigned IdxEn = 1; unsigned IdxEn = getIdxEn(Op.getOperand(4));
if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
IdxEn = Idx->getZExtValue() != 0;
SDValue Ops[] = { SDValue Ops[] = {
Chain, Chain,
VData, // vdata VData, // vdata
@ -7737,9 +7720,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
VData = handleD16VData(VData, DAG); VData = handleD16VData(VData, DAG);
unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue(); unsigned Glc = cast<ConstantSDNode>(Op.getOperand(6))->getZExtValue();
unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue(); unsigned Slc = cast<ConstantSDNode>(Op.getOperand(7))->getZExtValue();
unsigned IdxEn = 1; unsigned IdxEn = getIdxEn(Op.getOperand(4));
if (auto Idx = dyn_cast<ConstantSDNode>(Op.getOperand(4)))
IdxEn = Idx->getZExtValue() != 0;
SDValue Ops[] = { SDValue Ops[] = {
Chain, Chain,
VData, VData,
@ -7751,15 +7732,13 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy DAG.getTargetConstant(Glc | (Slc << 1), DL, MVT::i32), // cachepolicy
DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen DAG.getTargetConstant(IdxEn, DL, MVT::i1), // idxen
}; };
unsigned Offset = setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]); setBufferOffsets(Op.getOperand(5), DAG, &Ops[4]);
// We don't know the offset if vindex is non-zero, so clear it.
if (IdxEn)
Offset = 0;
unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ? unsigned Opc = IntrinsicID == Intrinsic::amdgcn_buffer_store ?
AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
MemSDNode *M = cast<MemSDNode>(Op); MemSDNode *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(Offset); updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
// Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
EVT VDataType = VData.getValueType().getScalarType(); EVT VDataType = VData.getValueType().getScalarType();
@ -7806,7 +7785,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE; IsFormat ? AMDGPUISD::BUFFER_STORE_FORMAT : AMDGPUISD::BUFFER_STORE;
Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
MemSDNode *M = cast<MemSDNode>(Op); MemSDNode *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6])); updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6]);
// Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32) if (!IsD16 && !VDataVT.isVector() && EltType.getSizeInBits() < 32)
@ -7853,8 +7832,7 @@ SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT; AMDGPUISD::BUFFER_STORE : AMDGPUISD::BUFFER_STORE_FORMAT;
Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc; Opc = IsD16 ? AMDGPUISD::BUFFER_STORE_FORMAT_D16 : Opc;
MemSDNode *M = cast<MemSDNode>(Op); MemSDNode *M = cast<MemSDNode>(Op);
M->getMemOperand()->setOffset(getBufferOffsetForMMO(Ops[4], Ops[5], Ops[6], updateBufferMMO(M->getMemOperand(), Ops[4], Ops[5], Ops[6], Ops[3]);
Ops[3]));
// Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics // Handle BUFFER_STORE_BYTE/SHORT overloaded intrinsics
EVT VDataType = VData.getValueType().getScalarType(); EVT VDataType = VData.getValueType().getScalarType();
@ -7934,7 +7912,7 @@ std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
// Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
// three offsets (voffset, soffset and instoffset) into the SDValue[3] array // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
// pointed to by Offsets. // pointed to by Offsets.
unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset, void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
SelectionDAG &DAG, SDValue *Offsets, SelectionDAG &DAG, SDValue *Offsets,
Align Alignment) const { Align Alignment) const {
SDLoc DL(CombinedOffset); SDLoc DL(CombinedOffset);
@ -7946,7 +7924,7 @@ unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
Offsets[0] = DAG.getConstant(0, DL, MVT::i32); Offsets[0] = DAG.getConstant(0, DL, MVT::i32);
Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
return SOffset + ImmOffset; return;
} }
} }
if (DAG.isBaseWithConstantOffset(CombinedOffset)) { if (DAG.isBaseWithConstantOffset(CombinedOffset)) {
@ -7959,13 +7937,12 @@ unsigned SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
Offsets[0] = N0; Offsets[0] = N0;
Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32); Offsets[1] = DAG.getConstant(SOffset, DL, MVT::i32);
Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32); Offsets[2] = DAG.getTargetConstant(ImmOffset, DL, MVT::i32);
return 0; return;
} }
} }
Offsets[0] = CombinedOffset; Offsets[0] = CombinedOffset;
Offsets[1] = DAG.getConstant(0, DL, MVT::i32); Offsets[1] = DAG.getConstant(0, DL, MVT::i32);
Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32); Offsets[2] = DAG.getTargetConstant(0, DL, MVT::i32);
return 0;
} }
// Handle 8 bit and 16 bit buffer loads // Handle 8 bit and 16 bit buffer loads

View File

@ -231,9 +231,7 @@ private:
// Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
// three offsets (voffset, soffset and instoffset) into the SDValue[3] array // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
// pointed to by Offsets. // pointed to by Offsets.
/// \returns 0 If there is a non-constant offset or if the offset is 0. void setBufferOffsets(SDValue CombinedOffset, SelectionDAG &DAG,
/// Otherwise returns the constant offset.
unsigned setBufferOffsets(SDValue CombinedOffset, SelectionDAG &DAG,
SDValue *Offsets, Align Alignment = Align(4)) const; SDValue *Offsets, Align Alignment = Align(4)) const;
// Handle 8 bit and 16 bit buffer loads // Handle 8 bit and 16 bit buffer loads