1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 11:02:59 +02:00

Revert "[InstCombine] try to narrow a truncated load"

This reverts commit bc4a63fd3c29c1a8ce22891bf34ee4dccfef578c, this is a
speculative revert to fix a number of sanitizer bots (like
sanitizer-x86_64-linux-bootstrap-ubsan) that have started to see stage2
compiler crashes, presumably due to a miscompile.

llvm-svn: 367029
This commit is contained in:
Vlad Tsyrklevich 2019-07-25 15:37:57 +00:00
parent c58973af11
commit 9f0f005f28
2 changed files with 12 additions and 89 deletions

View File

@ -681,42 +681,6 @@ static Instruction *shrinkInsertElt(CastInst &Trunc,
return nullptr;
}
static Instruction *narrowLoad(TruncInst &Trunc,
InstCombiner::BuilderTy &Builder,
const DataLayout &DL) {
// Check the layout to ensure we are not creating an unsupported operation.
// TODO: Create a GEP to offset the load?
if (!DL.isLittleEndian())
return nullptr;
unsigned NarrowBitWidth = Trunc.getDestTy()->getPrimitiveSizeInBits();
if (!DL.isLegalInteger(NarrowBitWidth))
return nullptr;
// Match a truncated load with no other uses.
Value *X;
if (!match(Trunc.getOperand(0), m_OneUse(m_Load(m_Value(X)))))
return nullptr;
LoadInst *WideLoad = cast<LoadInst>(Trunc.getOperand(0));
if (!WideLoad->isSimple())
return nullptr;
// Don't narrow this load if we would lose information about the
// dereferenceable range.
bool CanBeNull;
uint64_t DerefBits = X->getPointerDereferenceableBytes(DL, CanBeNull) * 8;
if (DerefBits < WideLoad->getType()->getPrimitiveSizeInBits())
return nullptr;
// trunc (load X) --> load (bitcast X)
PointerType *PtrTy = PointerType::get(Trunc.getDestTy(),
WideLoad->getPointerAddressSpace());
Value *Bitcast = Builder.CreatePointerCast(X, PtrTy);
LoadInst *NarrowLoad = new LoadInst(Trunc.getDestTy(), Bitcast);
NarrowLoad->setAlignment(WideLoad->getAlignment());
copyMetadataForLoad(*NarrowLoad, *WideLoad);
return NarrowLoad;
}
Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
if (Instruction *Result = commonCastTransforms(CI))
return Result;
@ -876,9 +840,6 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
if (Instruction *I = foldVecTruncToExtElt(CI, *this))
return I;
if (Instruction *NewLoad = narrowLoad(CI, Builder, DL))
return NewLoad;
return nullptr;
}

View File

@ -29,15 +29,10 @@ define i32 @truncload_small_deref(i64* dereferenceable(7) %ptr) {
; On little-endian, we can narrow the load without an offset.
define i32 @truncload_deref(i64* dereferenceable(8) %ptr) {
; LE-LABEL: @truncload_deref(
; LE-NEXT: [[TMP1:%.*]] = bitcast i64* [[PTR:%.*]] to i32*
; LE-NEXT: [[R:%.*]] = load i32, i32* [[TMP1]], align 4
; LE-NEXT: ret i32 [[R]]
;
; BE-LABEL: @truncload_deref(
; BE-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 4
; BE-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; BE-NEXT: ret i32 [[R]]
; CHECK-LABEL: @truncload_deref(
; CHECK-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
%x = load i64, i64* %ptr
%r = trunc i64 %x to i32
@ -47,15 +42,10 @@ define i32 @truncload_deref(i64* dereferenceable(8) %ptr) {
; Preserve alignment.
define i16 @truncload_align(i32* dereferenceable(14) %ptr) {
; LE-LABEL: @truncload_align(
; LE-NEXT: [[TMP1:%.*]] = bitcast i32* [[PTR:%.*]] to i16*
; LE-NEXT: [[R:%.*]] = load i16, i16* [[TMP1]], align 16
; LE-NEXT: ret i16 [[R]]
;
; BE-LABEL: @truncload_align(
; BE-NEXT: [[X:%.*]] = load i32, i32* [[PTR:%.*]], align 16
; BE-NEXT: [[R:%.*]] = trunc i32 [[X]] to i16
; BE-NEXT: ret i16 [[R]]
; CHECK-LABEL: @truncload_align(
; CHECK-NEXT: [[X:%.*]] = load i32, i32* [[PTR:%.*]], align 16
; CHECK-NEXT: [[R:%.*]] = trunc i32 [[X]] to i16
; CHECK-NEXT: ret i16 [[R]]
;
%x = load i32, i32* %ptr, align 16
%r = trunc i32 %x to i16
@ -108,40 +98,12 @@ define i32 @truncload_volatile(i64* dereferenceable(8) %ptr) {
; Preserve address space.
define i32 @truncload_address_space(i64 addrspace(1)* dereferenceable(8) %ptr) {
; LE-LABEL: @truncload_address_space(
; LE-NEXT: [[TMP1:%.*]] = bitcast i64 addrspace(1)* [[PTR:%.*]] to i32 addrspace(1)*
; LE-NEXT: [[R:%.*]] = load i32, i32 addrspace(1)* [[TMP1]], align 4
; LE-NEXT: ret i32 [[R]]
;
; BE-LABEL: @truncload_address_space(
; BE-NEXT: [[X:%.*]] = load i64, i64 addrspace(1)* [[PTR:%.*]], align 4
; BE-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; BE-NEXT: ret i32 [[R]]
; CHECK-LABEL: @truncload_address_space(
; CHECK-NEXT: [[X:%.*]] = load i64, i64 addrspace(1)* [[PTR:%.*]], align 4
; CHECK-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; CHECK-NEXT: ret i32 [[R]]
;
%x = load i64, i64 addrspace(1)* %ptr, align 4
%r = trunc i64 %x to i32
ret i32 %r
}
; Most metadata should be transferred to the narrow load.
; TODO: We lost the range.
define i32 @truncload_metadata(i64* dereferenceable(8) %ptr) {
; LE-LABEL: @truncload_metadata(
; LE-NEXT: [[TMP1:%.*]] = bitcast i64* [[PTR:%.*]] to i32*
; LE-NEXT: [[R:%.*]] = load i32, i32* [[TMP1]], align 4, !invariant.load !0, !nontemporal !1
; LE-NEXT: ret i32 [[R]]
;
; BE-LABEL: @truncload_metadata(
; BE-NEXT: [[X:%.*]] = load i64, i64* [[PTR:%.*]], align 4, !range !0, !invariant.load !1, !nontemporal !2
; BE-NEXT: [[R:%.*]] = trunc i64 [[X]] to i32
; BE-NEXT: ret i32 [[R]]
;
%x = load i64, i64* %ptr, align 4, !invariant.load !0, !nontemporal !1, !range !2
%r = trunc i64 %x to i32
ret i32 %r
}
!0 = !{}
!1 = !{i32 1}
!2 = !{i64 0, i64 2}