mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 10:42:39 +01:00
Prevent Constant Folding From Optimizing inrange GEP
This patch does the following things: 1. update SymbolicallyEvaluateGEP so that it bails out if it cannot preserve inrange arribute; 2. update llvm/test/Analysis/ConstantFolding/gep.ll to remove UB in it; 3. remove inaccurate comment above ConstantFoldInstOperandsImpl in llvm/lib/Analysis/ConstantFolding.cpp; 4. add a new regression test that makes sure that no optimizations change an inrange GEP in an unexpected way. Patch by Zhaomo Yang! Differential Revision: https://reviews.llvm.org/D51698 llvm-svn: 341888
This commit is contained in:
parent
54bc26f33d
commit
b6e7ff238c
@ -960,10 +960,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
||||
NewIdxs.size() > *LastIRIndex) {
|
||||
InRangeIndex = LastIRIndex;
|
||||
for (unsigned I = 0; I <= *LastIRIndex; ++I)
|
||||
if (NewIdxs[I] != InnermostGEP->getOperand(I + 1)) {
|
||||
InRangeIndex = None;
|
||||
break;
|
||||
}
|
||||
if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Create a GEP.
|
||||
@ -985,11 +983,6 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
|
||||
/// returned, if not, null is returned. Note that this function can fail when
|
||||
/// attempting to fold instructions like loads and stores, which have no
|
||||
/// constant expression form.
|
||||
///
|
||||
/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/inrange
|
||||
/// etc information, due to only being passed an opcode and operands. Constant
|
||||
/// folding using this function strips this information.
|
||||
///
|
||||
Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
|
||||
ArrayRef<Constant *> Ops,
|
||||
const DataLayout &DL,
|
||||
|
@ -8,23 +8,20 @@ target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
@vt = external global [3 x i8*]
|
||||
|
||||
; CHECK: define i32 (...)* @f0()
|
||||
define i32 (...)* @f0() {
|
||||
; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 2) to i32 (...)**)
|
||||
%load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 1) to i32 (...)**), i64 1)
|
||||
ret i32 (...)* %load
|
||||
; CHECK: define i32 (...)** @f0()
|
||||
define i32 (...)** @f0() {
|
||||
; CHECK-NEXT: ret i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 2) to i32 (...)**
|
||||
ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, inrange i64 0, i64 1) to i32 (...)**), i64 1)
|
||||
}
|
||||
|
||||
; CHECK: define i32 (...)* @f1()
|
||||
define i32 (...)* @f1() {
|
||||
; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, i64 2) to i32 (...)**)
|
||||
%load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1)
|
||||
ret i32 (...)* %load
|
||||
; CHECK: define i32 (...)** @f1()
|
||||
define i32 (...)** @f1() {
|
||||
; CHECK-NEXT: ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1)
|
||||
ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 1)
|
||||
}
|
||||
|
||||
; CHECK: define i32 (...)* @f2()
|
||||
define i32 (...)* @f2() {
|
||||
; CHECK-NEXT: load i32 (...)*, i32 (...)** bitcast (i8** getelementptr ([3 x i8*], [3 x i8*]* @vt, i64 1, i64 1) to i32 (...)**)
|
||||
%load = load i32 (...)*, i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3)
|
||||
ret i32 (...)* %load
|
||||
; CHECK: define i32 (...)** @f2()
|
||||
define i32 (...)** @f2() {
|
||||
; CHECK-NEXT: ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3)
|
||||
ret i32 (...)** getelementptr (i32 (...)*, i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @vt, i64 0, inrange i64 1) to i32 (...)**), i64 3)
|
||||
}
|
||||
|
18
test/Other/optimize-inrange-gep.ll
Normal file
18
test/Other/optimize-inrange-gep.ll
Normal file
@ -0,0 +1,18 @@
|
||||
; RUN: opt -O0 -S < %s | FileCheck %s
|
||||
; RUN: opt -O1 -S < %s | FileCheck %s
|
||||
; RUN: opt -O2 -S < %s | FileCheck %s
|
||||
; RUN: opt -O3 -S < %s | FileCheck %s
|
||||
; RUN: opt -Os -S < %s | FileCheck %s
|
||||
; RUN: opt -Oz -S < %s | FileCheck %s
|
||||
|
||||
target datalayout = "e-p:64:64"
|
||||
|
||||
; Make sure that optimizations do not optimize inrange GEP.
|
||||
|
||||
@vtable = constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* null, i8* null] }
|
||||
|
||||
define void @foo(i8*** %p) {
|
||||
;CHECK: store i8** getelementptr {{.*}} ({ [3 x i8*] }, { [3 x i8*] }* @vtable, i{{.*}} 0, inrange i32 0, i{{.*}} 3), i8*** %p
|
||||
store i8** getelementptr ({ [3 x i8*] }, { [3 x i8*] }* @vtable, i32 0, inrange i32 0, i32 3), i8*** %p
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue
Block a user