1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[StackProtector] Catch direct out-of-bounds when checking address-takenness

With -fstack-protector-strong we check if a non-array variable has its address
taken in a way that could cause a potential out-of-bounds access. However what
we don't catch is when the address is directly used to create an out-of-bounds
memory access.

Fix this by examining the offsets of GEPs that are ultimately derived from
allocas and checking if the resulting address is out-of-bounds, and by checking
that any memory operations using such addresses are not over-large.

Fixes PR43478.

Differential revision: https://reviews.llvm.org/D75695
This commit is contained in:
John Brawn 2020-03-05 17:18:47 +00:00
parent c586fc306f
commit 6d967b51bd
3 changed files with 448 additions and 6 deletions

View File

@ -95,7 +95,7 @@ private:
bool InStruct = false) const;
/// Check whether a stack allocation has its address taken.
bool HasAddressTaken(const Instruction *AI);
bool HasAddressTaken(const Instruction *AI, uint64_t AllocSize);
/// RequiresStackProtector - Check whether or not this function needs a
/// stack protector based upon the stack protector level.

View File

@ -18,6 +18,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetLowering.h"
@ -161,9 +162,16 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
return NeedsProtector;
}
bool StackProtector::HasAddressTaken(const Instruction *AI) {
bool StackProtector::HasAddressTaken(const Instruction *AI,
uint64_t AllocSize) {
const DataLayout &DL = M->getDataLayout();
for (const User *U : AI->users()) {
const auto *I = cast<Instruction>(U);
// If this instruction accesses memory make sure it doesn't access beyond
// the bounds of the allocated object.
Optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(I);
if (MemLoc.hasValue() && MemLoc->Size.getValue() > AllocSize)
return true;
switch (I->getOpcode()) {
case Instruction::Store:
if (AI == cast<StoreInst>(I)->getValueOperand())
@ -189,11 +197,26 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) {
}
case Instruction::Invoke:
return true;
case Instruction::GetElementPtr: {
// If the GEP offset is out-of-bounds, or is non-constant and so has to be
// assumed to be potentially out-of-bounds, then any memory access that
// would use it could also be out-of-bounds meaning stack protection is
// required.
const GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
unsigned TypeSize = DL.getIndexTypeSizeInBits(I->getType());
APInt Offset(TypeSize, 0);
APInt MaxOffset(TypeSize, AllocSize);
if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.ugt(MaxOffset))
return true;
// Adjust AllocSize to be the space remaining after this offset.
if (HasAddressTaken(I, AllocSize - Offset.getLimitedValue()))
return true;
break;
}
case Instruction::BitCast:
case Instruction::GetElementPtr:
case Instruction::Select:
case Instruction::AddrSpaceCast:
if (HasAddressTaken(I))
if (HasAddressTaken(I, AllocSize))
return true;
break;
case Instruction::PHI: {
@ -201,7 +224,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) {
// they are only visited once.
const auto *PN = cast<PHINode>(I);
if (VisitedPHIs.insert(PN).second)
if (HasAddressTaken(PN))
if (HasAddressTaken(PN, AllocSize))
return true;
break;
}
@ -330,7 +353,8 @@ bool StackProtector::RequiresStackProtector() {
continue;
}
if (Strong && HasAddressTaken(AI)) {
if (Strong && HasAddressTaken(AI, M->getDataLayout().getTypeAllocSize(
AI->getAllocatedType()))) {
++NumAddrTaken;
Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
ORE.emit([&]() {
@ -342,6 +366,9 @@ bool StackProtector::RequiresStackProtector() {
});
NeedsProtector = true;
}
// Clear any PHIs that we visited, to make sure we examine all uses of
// any subsequent allocas that we look at.
VisitedPHIs.clear();
}
}
}

View File

@ -0,0 +1,415 @@
; RUN: llc -mtriple=i686 -O0 < %s | FileCheck %s
; RUN: llc -mtriple=x86_64 -O0 < %s | FileCheck %s
; CHECK-LABEL: in_bounds:
; CHECK-NOT: __stack_chk_guard
define i32 @in_bounds() #0 {
%var = alloca i32, align 4
store i32 0, i32* %var, align 4
%gep = getelementptr inbounds i32, i32* %var, i32 0
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: constant_out_of_bounds:
; CHECK: __stack_chk_guard
define i32 @constant_out_of_bounds() #0 {
%var = alloca i32, align 4
store i32 0, i32* %var, align 4
%gep = getelementptr inbounds i32, i32* %var, i32 1
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: nonconstant_out_of_bounds:
; CHECK: __stack_chk_guard
define i32 @nonconstant_out_of_bounds(i32 %n) #0 {
%var = alloca i32, align 4
store i32 0, i32* %var, align 4
%gep = getelementptr inbounds i32, i32* %var, i32 %n
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_before_gep_in_bounds:
; CHECK-NOT: __stack_chk_guard
define i32 @phi_before_gep_in_bounds(i32 %k) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
br label %then
then:
%ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
%gep = getelementptr inbounds i32, i32* %ptr, i32 0
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_before_gep_constant_out_of_bounds:
; CHECK: __stack_chk_guard
define i32 @phi_before_gep_constant_out_of_bounds(i32 %k) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
br label %then
then:
%ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
%gep = getelementptr inbounds i32, i32* %ptr, i32 1
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_before_gep_nonconstant_out_of_bounds:
; CHECK: __stack_chk_guard
define i32 @phi_before_gep_nonconstant_out_of_bounds(i32 %k, i32 %n) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
br label %then
then:
%ptr = phi i32* [ %var1, %entry ], [ %var2, %if ]
%gep = getelementptr inbounds i32, i32* %ptr, i32 %n
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_after_gep_in_bounds:
; CHECK-NOT: __stack_chk_guard
define i32 @phi_after_gep_in_bounds(i32 %k) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
%gep1 = getelementptr inbounds i32, i32* %var1, i32 0
br label %then
else:
%gep2 = getelementptr inbounds i32, i32* %var2, i32 0
br label %then
then:
%ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
%ret = load i32, i32* %ptr, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_after_gep_constant_out_of_bounds_a:
; CHECK: __stack_chk_guard
define i32 @phi_after_gep_constant_out_of_bounds_a(i32 %k) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
%gep1 = getelementptr inbounds i32, i32* %var1, i32 0
br label %then
else:
%gep2 = getelementptr inbounds i32, i32* %var2, i32 1
br label %then
then:
%ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
%ret = load i32, i32* %ptr, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_after_gep_constant_out_of_bounds_b:
; CHECK: __stack_chk_guard
define i32 @phi_after_gep_constant_out_of_bounds_b(i32 %k) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
%gep1 = getelementptr inbounds i32, i32* %var1, i32 1
br label %then
else:
%gep2 = getelementptr inbounds i32, i32* %var2, i32 0
br label %then
then:
%ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
%ret = load i32, i32* %ptr, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_different_types_a:
; CHECK: __stack_chk_guard
define i64 @phi_different_types_a(i32 %k) #0 {
entry:
%var1 = alloca i64, align 4
%var2 = alloca i32, align 4
store i64 0, i64* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
%bitcast = bitcast i32* %var2 to i64*
br label %then
then:
%ptr = phi i64* [ %var1, %entry ], [ %bitcast, %if ]
%ret = load i64, i64* %ptr, align 4
ret i64 %ret
}
; CHECK-LABEL: phi_different_types_b:
; CHECK: __stack_chk_guard
define i64 @phi_different_types_b(i32 %k) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i64, align 4
store i32 0, i32* %var1, align 4
store i64 0, i64* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %then
if:
%bitcast = bitcast i32* %var1 to i64*
br label %then
then:
%ptr = phi i64* [ %var2, %entry ], [ %bitcast, %if ]
%ret = load i64, i64* %ptr, align 4
ret i64 %ret
}
; CHECK-LABEL: phi_after_gep_nonconstant_out_of_bounds_a:
; CHECK: __stack_chk_guard
define i32 @phi_after_gep_nonconstant_out_of_bounds_a(i32 %k, i32 %n) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
%gep1 = getelementptr inbounds i32, i32* %var1, i32 0
br label %then
else:
%gep2 = getelementptr inbounds i32, i32* %var2, i32 %n
br label %then
then:
%ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
%ret = load i32, i32* %ptr, align 4
ret i32 %ret
}
; CHECK-LABEL: phi_after_gep_nonconstant_out_of_bounds_b:
; CHECK: __stack_chk_guard
define i32 @phi_after_gep_nonconstant_out_of_bounds_b(i32 %k, i32 %n) #0 {
entry:
%var1 = alloca i32, align 4
%var2 = alloca i32, align 4
store i32 0, i32* %var1, align 4
store i32 0, i32* %var2, align 4
%cmp = icmp ne i32 %k, 0
br i1 %cmp, label %if, label %else
if:
%gep1 = getelementptr inbounds i32, i32* %var1, i32 %n
br label %then
else:
%gep2 = getelementptr inbounds i32, i32* %var2, i32 0
br label %then
then:
%ptr = phi i32* [ %gep1, %if ], [ %gep2, %else ]
%ret = load i32, i32* %ptr, align 4
ret i32 %ret
}
%struct.outer = type { %struct.inner, %struct.inner }
%struct.inner = type { i32, i32 }
; CHECK-LABEL: struct_in_bounds:
; CHECK-NOT: __stack_chk_guard
define void @struct_in_bounds() #0 {
%var = alloca %struct.outer, align 4
%outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 1
%innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 1
store i32 0, i32* %innergep, align 4
ret void
}
; CHECK-LABEL: struct_constant_out_of_bounds_a:
; CHECK: __stack_chk_guard
define void @struct_constant_out_of_bounds_a() #0 {
%var = alloca %struct.outer, align 4
%outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 1, i32 0
%innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 0
store i32 0, i32* %innergep, align 4
ret void
}
; CHECK-LABEL: struct_constant_out_of_bounds_b:
; Here the offset is out-of-bounds of the addressed struct.inner member, but
; still within bounds of the outer struct so no stack guard is needed.
; CHECK-NOT: __stack_chk_guard
define void @struct_constant_out_of_bounds_b() #0 {
%var = alloca %struct.outer, align 4
%outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 0
%innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 1, i32 0
store i32 0, i32* %innergep, align 4
ret void
}
; CHECK-LABEL: struct_constant_out_of_bounds_c:
; Here we are out-of-bounds of both the inner and outer struct.
; CHECK: __stack_chk_guard
define void @struct_constant_out_of_bounds_c() #0 {
%var = alloca %struct.outer, align 4
%outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 1
%innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 1, i32 0
store i32 0, i32* %innergep, align 4
ret void
}
; CHECK-LABEL: struct_nonconstant_out_of_bounds_a:
; CHECK: __stack_chk_guard
define void @struct_nonconstant_out_of_bounds_a(i32 %n) #0 {
%var = alloca %struct.outer, align 4
%outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 %n, i32 0
%innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 0, i32 0
store i32 0, i32* %innergep, align 4
ret void
}
; CHECK-LABEL: struct_nonconstant_out_of_bounds_b:
; CHECK: __stack_chk_guard
define void @struct_nonconstant_out_of_bounds_b(i32 %n) #0 {
%var = alloca %struct.outer, align 4
%outergep = getelementptr inbounds %struct.outer, %struct.outer* %var, i32 0, i32 0
%innergep = getelementptr inbounds %struct.inner, %struct.inner* %outergep, i32 %n, i32 0
store i32 0, i32* %innergep, align 4
ret void
}
; CHECK-LABEL: bitcast_smaller_load
; CHECK-NOT: __stack_chk_guard
define i32 @bitcast_smaller_load() #0 {
%var = alloca i64, align 4
store i64 0, i64* %var, align 4
%bitcast = bitcast i64* %var to i32*
%ret = load i32, i32* %bitcast, align 4
ret i32 %ret
}
; CHECK-LABEL: bitcast_same_size_load
; CHECK-NOT: __stack_chk_guard
define i32 @bitcast_same_size_load() #0 {
%var = alloca i64, align 4
store i64 0, i64* %var, align 4
%bitcast = bitcast i64* %var to %struct.inner*
%gep = getelementptr inbounds %struct.inner, %struct.inner* %bitcast, i32 0, i32 1
%ret = load i32, i32* %gep, align 4
ret i32 %ret
}
; CHECK-LABEL: bitcast_larger_load
; CHECK: __stack_chk_guard
define i64 @bitcast_larger_load() #0 {
%var = alloca i32, align 4
store i32 0, i32* %var, align 4
%bitcast = bitcast i32* %var to i64*
%ret = load i64, i64* %bitcast, align 4
ret i64 %ret
}
; CHECK-LABEL: bitcast_larger_store
; CHECK: __stack_chk_guard
define i32 @bitcast_larger_store() #0 {
%var = alloca i32, align 4
%bitcast = bitcast i32* %var to i64*
store i64 0, i64* %bitcast, align 4
%ret = load i32, i32* %var, align 4
ret i32 %ret
}
; CHECK-LABEL: bitcast_larger_cmpxchg
; CHECK: __stack_chk_guard
define i64 @bitcast_larger_cmpxchg(i64 %desired, i64 %new) #0 {
%var = alloca i32, align 4
%bitcast = bitcast i32* %var to i64*
%pair = cmpxchg i64* %bitcast, i64 %desired, i64 %new seq_cst monotonic
%ret = extractvalue { i64, i1 } %pair, 0
ret i64 %ret
}
; CHECK-LABEL: bitcast_larger_atomic_rmw
; CHECK: __stack_chk_guard
define i64 @bitcast_larger_atomic_rmw() #0 {
%var = alloca i32, align 4
%bitcast = bitcast i32* %var to i64*
%ret = atomicrmw add i64* %bitcast, i64 1 monotonic
ret i64 %ret
}
%struct.packed = type <{ i16, i32 }>
; CHECK-LABEL: bitcast_overlap
; CHECK: __stack_chk_guard
define i32 @bitcast_overlap() #0 {
%var = alloca i32, align 4
%bitcast = bitcast i32* %var to %struct.packed*
%gep = getelementptr inbounds %struct.packed, %struct.packed* %bitcast, i32 0, i32 1
%ret = load i32, i32* %gep, align 2
ret i32 %ret
}
%struct.multi_dimensional = type { [10 x [10 x i32]], i32 }
; CHECK-LABEL: multi_dimensional_array
; CHECK: __stack_chk_guard
define i32 @multi_dimensional_array() #0 {
%var = alloca %struct.multi_dimensional, align 4
%gep1 = getelementptr inbounds %struct.multi_dimensional, %struct.multi_dimensional* %var, i32 0, i32 0
%gep2 = getelementptr inbounds [10 x [10 x i32]], [10 x [10 x i32]]* %gep1, i32 0, i32 10
%gep3 = getelementptr inbounds [10 x i32], [10 x i32]* %gep2, i32 0, i32 5
%ret = load i32, i32* %gep3, align 4
ret i32 %ret
}
attributes #0 = { sspstrong }