1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[stackprotector] Implement the sspstrong rules for stack layout.

This changes the PrologueEpilogInserter and LocalStackSlotAllocation passes to
follow the extended stack layout rules for sspstrong and sspreq.

The sspstrong layout rules are:
 1. Large arrays and structures containing large arrays (>= ssp-buffer-size)
are closest to the stack protector.
 2. Small arrays and structures containing small arrays (< ssp-buffer-size) are
2nd closest to the protector.
 3. Variables that have had their address taken are 3rd closest to the
protector.


Differential Revision: http://llvm-reviews.chandlerc.com/D2546

llvm-svn: 200601
This commit is contained in:
Josh Magee 2014-02-01 01:36:16 +00:00
parent 239e9806ff
commit d0e03ee88f
5 changed files with 644 additions and 4 deletions

View File

@ -1112,6 +1112,9 @@ example:
- Calls to alloca() with variable sizes or constant sizes greater than
``ssp-buffer-size``.
Variables that are identified as requiring a protector will be arranged
on the stack such that they are adjacent to the stack protector guard.
If a function that has an ``ssp`` attribute is inlined into a
function that doesn't have an ``ssp`` attribute, then the resulting
function will have an ``ssp`` attribute.
@ -1120,6 +1123,17 @@ example:
stack smashing protector. This overrides the ``ssp`` function
attribute.
Variables that are identified as requiring a protector will be arranged
on the stack such that they are adjacent to the stack protector guard.
The specific layout rules are:
#. Large arrays and structures containing large arrays
(``>= ssp-buffer-size``) are closest to the stack protector.
#. Small arrays and structures containing small arrays
(``< ssp-buffer-size``) are 2nd closest to the protector.
#. Variables that have had their address taken are 3rd closest to the
protector.
If a function that has an ``sspreq`` attribute is inlined into a
function that doesn't have an ``sspreq`` attribute or which has an
``ssp`` or ``sspstrong`` attribute, then the resulting function will have
@ -1135,6 +1149,17 @@ example:
- Calls to alloca().
- Local variables that have had their address taken.
Variables that are identified as requiring a protector will be arranged
on the stack such that they are adjacent to the stack protector guard.
The specific layout rules are:
#. Large arrays and structures containing large arrays
(``>= ssp-buffer-size``) are closest to the stack protector.
#. Small arrays and structures containing small arrays
(``< ssp-buffer-size``) are 2nd closest to the protector.
#. Variables that have had their address taken are 3rd closest to the
protector.
This overrides the ``ssp`` function attribute.
If a function that has an ``sspstrong`` attribute is inlined into a

View File

@ -194,6 +194,9 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
StackGrowsDown, MaxAlign);
@ -206,8 +209,12 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
SmallArrayObjs.insert(i);
continue;
case StackProtector::SSPLK_AddrOf:
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
@ -218,6 +225,10 @@ void LocalStackSlotPass::calculateFrameObjectOffsets(MachineFunction &Fn) {
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill

View File

@ -553,6 +553,9 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
SmallSet<int, 16> ProtectedObjs;
if (MFI->getStackProtectorIndex() >= 0) {
StackObjSet LargeArrayObjs;
StackObjSet SmallArrayObjs;
StackObjSet AddrOfObjs;
AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
Offset, MaxAlign);
@ -572,8 +575,12 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
case StackProtector::SSPLK_None:
continue;
case StackProtector::SSPLK_SmallArray:
SmallArrayObjs.insert(i);
continue;
case StackProtector::SSPLK_AddrOf:
AddrOfObjs.insert(i);
continue;
case StackProtector::SSPLK_LargeArray:
LargeArrayObjs.insert(i);
@ -584,6 +591,10 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
Offset, MaxAlign);
}
// Then assign frame offsets to stack objects that are not used to spill

View File

@ -165,6 +165,327 @@ entry:
ret void
}
define void @layout_sspstrong() sspstrong {
entry:
; Expected stack layout for sspstrong is
; 144 large_nonchar . Group 1, nested arrays,
; 136 large_char . arrays >= ssp-buffer-size
; 128 struct_large_char .
; 96 struct_large_nonchar .
; 84+8 small_non_char | Group 2, nested arrays,
; 90 small_char | arrays < ssp-buffer-size
; 88 struct_small_char |
; 84 struct_small_nonchar |
; 80 addrof * Group 3, addr-of local
; 76 scalar1 + Group 4, everything else
; 72 scalar2 +
; 68 scalar3 +
;
; CHECK: layout_sspstrong:
; r[[SP]] is used as an offset into the stack later
; CHECK: add r[[SP:[0-9]+]], sp, #84
; CHECK: bl get_scalar1
; CHECK: str r0, [sp, #76]
; CHECK: bl end_scalar1
; CHECK: bl get_scalar2
; CHECK: str r0, [sp, #72]
; CHECK: bl end_scalar2
; CHECK: bl get_scalar3
; CHECK: str r0, [sp, #68]
; CHECK: bl end_scalar3
; CHECK: bl get_addrof
; CHECK: str r0, [sp, #80]
; CHECK: bl end_addrof
; CHECK: get_small_nonchar
; CHECK: strh r0, [r[[SP]], #8]
; CHECK: bl end_small_nonchar
; CHECK: bl get_large_nonchar
; CHECK: str r0, [sp, #144]
; CHECK: bl end_large_nonchar
; CHECK: bl get_small_char
; CHECK: strb r0, [sp, #90]
; CHECK: bl end_small_char
; CHECK: bl get_large_char
; CHECK: strb r0, [sp, #136]
; CHECK: bl end_large_char
; CHECK: bl get_struct_large_char
; CHECK: strb r0, [sp, #128]
; CHECK: bl end_struct_large_char
; CHECK: bl get_struct_small_char
; CHECK: strb r0, [sp, #88]
; CHECK: bl end_struct_small_char
; CHECK: bl get_struct_large_nonchar
; CHECK: str r0, [sp, #96]
; CHECK: bl end_struct_large_nonchar
; CHECK: bl get_struct_small_nonchar
; CHECK: strh r0, [r[[SP]]]
; CHECK: bl end_struct_small_nonchar
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i32, align 4
%ptr = alloca i32, align 4
%small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
%small = alloca [2 x i8], align 1
%large = alloca [8 x i8], align 1
%a = alloca %struct.struct_large_char, align 1
%b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
%d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, i32* %x, align 4
call void @end_scalar1()
%call1 = call i32 @get_scalar2()
store i32 %call1, i32* %y, align 4
call void @end_scalar2()
%call2 = call i32 @get_scalar3()
store i32 %call2, i32* %z, align 4
call void @end_scalar3()
%call3 = call i32 @get_addrof()
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
%arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
%arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
%arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
%arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
%foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
%arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
%foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
%arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
%foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
%arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
%foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
%arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
%arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
%arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
define void @layout_sspreq() sspreq {
entry:
; Expected stack layout for sspreq is the same as sspstrong
;
; CHECK: layout_sspreq:
; r[[SP]] is used as an offset into the stack later
; CHECK: add r[[SP:[0-9]+]], sp, #84
; CHECK: bl get_scalar1
; CHECK: str r0, [sp, #76]
; CHECK: bl end_scalar1
; CHECK: bl get_scalar2
; CHECK: str r0, [sp, #72]
; CHECK: bl end_scalar2
; CHECK: bl get_scalar3
; CHECK: str r0, [sp, #68]
; CHECK: bl end_scalar3
; CHECK: bl get_addrof
; CHECK: str r0, [sp, #80]
; CHECK: bl end_addrof
; CHECK: get_small_nonchar
; CHECK: strh r0, [r[[SP]], #8]
; CHECK: bl end_small_nonchar
; CHECK: bl get_large_nonchar
; CHECK: str r0, [sp, #144]
; CHECK: bl end_large_nonchar
; CHECK: bl get_small_char
; CHECK: strb r0, [sp, #90]
; CHECK: bl end_small_char
; CHECK: bl get_large_char
; CHECK: strb r0, [sp, #136]
; CHECK: bl end_large_char
; CHECK: bl get_struct_large_char
; CHECK: strb r0, [sp, #128]
; CHECK: bl end_struct_large_char
; CHECK: bl get_struct_small_char
; CHECK: strb r0, [sp, #88]
; CHECK: bl end_struct_small_char
; CHECK: bl get_struct_large_nonchar
; CHECK: str r0, [sp, #96]
; CHECK: bl end_struct_large_nonchar
; CHECK: bl get_struct_small_nonchar
; CHECK: strh r0, [r[[SP]]]
; CHECK: bl end_struct_small_nonchar
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i32, align 4
%ptr = alloca i32, align 4
%small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
%small = alloca [2 x i8], align 1
%large = alloca [8 x i8], align 1
%a = alloca %struct.struct_large_char, align 1
%b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
%d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, i32* %x, align 4
call void @end_scalar1()
%call1 = call i32 @get_scalar2()
store i32 %call1, i32* %y, align 4
call void @end_scalar2()
%call2 = call i32 @get_scalar3()
store i32 %call2, i32* %z, align 4
call void @end_scalar3()
%call3 = call i32 @get_addrof()
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
%arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
%arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
%arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
%arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
%foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
%arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
%foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
%arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
%foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
%arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
%foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
%arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
%arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
%arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
define void @struct_with_protectable_arrays() sspstrong {
entry:
; Check to ensure that a structure which contains a small array followed by a
; large array is assigned to the stack properly as a large object.
; CHECK: struct_with_protectable_arrays:
; CHECK: bl get_struct_small_char
; CHECK: strb r0, [sp, #68]
; CHECK: bl end_struct_small_char
; CHECK: bl get_struct_large_char2
; CHECK: strb r0, [sp, #106]
; CHECK: bl end_struct_large_char2
%a = alloca %struct.struct_small_char, align 1
%b = alloca %struct.struct_large_char2, align 1
%d1 = alloca %struct.struct_large_nonchar, align 8
%d2 = alloca %struct.struct_small_nonchar, align 2
%call = call signext i8 @get_struct_small_char()
%foo = getelementptr inbounds %struct.struct_small_char* %a, i32 0, i32 0
%arrayidx = getelementptr inbounds [2 x i8]* %foo, i32 0, i64 0
store i8 %call, i8* %arrayidx, align 1
call void @end_struct_small_char()
%call1 = call signext i8 @get_struct_large_char2()
%foo2 = getelementptr inbounds %struct.struct_large_char2* %b, i32 0, i32 1
%arrayidx3 = getelementptr inbounds [8 x i8]* %foo2, i32 0, i64 0
store i8 %call1, i8* %arrayidx3, align 1
call void @end_struct_large_char2()
%0 = bitcast %struct.struct_large_char2* %b to %struct.struct_large_char*
%coerce.dive = getelementptr %struct.struct_large_char* %0, i32 0, i32 0
%1 = bitcast [8 x i8]* %coerce.dive to i64*
%2 = load i64* %1, align 1
%coerce.dive4 = getelementptr %struct.struct_small_char* %a, i32 0, i32 0
%3 = bitcast [2 x i8]* %coerce.dive4 to i16*
%4 = load i16* %3, align 1
%coerce.dive5 = getelementptr %struct.struct_small_nonchar* %d2, i32 0, i32 0
%5 = bitcast [2 x i16]* %coerce.dive5 to i32*
%6 = load i32* %5, align 1
call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 8 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
ret void
}
declare i32 @get_scalar1()
declare void @end_scalar1()

View File

@ -21,7 +21,6 @@
; on a non-linux target the data layout rules are triggered.
%struct.struct_large_char = type { [8 x i8] }
%struct.struct_large_char2 = type { [2 x i8], [8 x i8] }
%struct.struct_small_char = type { [2 x i8] }
%struct.struct_large_nonchar = type { [8 x i32] }
%struct.struct_small_nonchar = type { [2 x i16] }
@ -170,6 +169,282 @@ entry:
ret void
}
define void @layout_sspstrong() nounwind uwtable sspstrong {
entry:
; Expected stack layout for sspstrong is
; -48 large_nonchar . Group 1, nested arrays,
; -56 large_char . arrays >= ssp-buffer-size
; -64 struct_large_char .
; -96 struct_large_nonchar .
; -100 small_non_char | Group 2, nested arrays,
; -102 small_char | arrays < ssp-buffer-size
; -104 struct_small_char |
; -112 struct_small_nonchar |
; -116 addrof * Group 3, addr-of local
; -120 scalar + Group 4, everything else
; -124 scalar +
; -128 scalar +
;
; CHECK: layout_sspstrong:
; CHECK: call{{l|q}} get_scalar1
; CHECK: movl %eax, -120(
; CHECK: call{{l|q}} end_scalar1
; CHECK: call{{l|q}} get_scalar2
; CHECK: movl %eax, -124(
; CHECK: call{{l|q}} end_scalar2
; CHECK: call{{l|q}} get_scalar3
; CHECK: movl %eax, -128(
; CHECK: call{{l|q}} end_scalar3
; CHECK: call{{l|q}} get_addrof
; CHECK: movl %eax, -116(
; CHECK: call{{l|q}} end_addrof
; CHECK: get_small_nonchar
; CHECK: movw %ax, -100(
; CHECK: call{{l|q}} end_small_nonchar
; CHECK: call{{l|q}} get_large_nonchar
; CHECK: movl %eax, -48(
; CHECK: call{{l|q}} end_large_nonchar
; CHECK: call{{l|q}} get_small_char
; CHECK: movb %al, -102(
; CHECK: call{{l|q}} end_small_char
; CHECK: call{{l|q}} get_large_char
; CHECK: movb %al, -56(
; CHECK: call{{l|q}} end_large_char
; CHECK: call{{l|q}} get_struct_large_char
; CHECK: movb %al, -64(
; CHECK: call{{l|q}} end_struct_large_char
; CHECK: call{{l|q}} get_struct_small_char
; CHECK: movb %al, -104(
; CHECK: call{{l|q}} end_struct_small_char
; CHECK: call{{l|q}} get_struct_large_nonchar
; CHECK: movl %eax, -96(
; CHECK: call{{l|q}} end_struct_large_nonchar
; CHECK: call{{l|q}} get_struct_small_nonchar
; CHECK: movw %ax, -112(
; CHECK: call{{l|q}} end_struct_small_nonchar
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i32, align 4
%ptr = alloca i32, align 4
%small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
%small = alloca [2 x i8], align 1
%large = alloca [8 x i8], align 1
%a = alloca %struct.struct_large_char, align 1
%b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
%d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, i32* %x, align 4
call void @end_scalar1()
%call1 = call i32 @get_scalar2()
store i32 %call1, i32* %y, align 4
call void @end_scalar2()
%call2 = call i32 @get_scalar3()
store i32 %call2, i32* %z, align 4
call void @end_scalar3()
%call3 = call i32 @get_addrof()
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
%arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
%arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
%arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
%arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
%foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
%arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
%foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
%arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
%foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
%arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
%foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
%arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
%arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
%arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
define void @layout_sspreq() nounwind uwtable sspreq {
entry:
; Expected stack layout for sspreq is the same as sspstrong
;
; CHECK: layout_sspreq:
; CHECK: call{{l|q}} get_scalar1
; CHECK: movl %eax, -120(
; CHECK: call{{l|q}} end_scalar1
; CHECK: call{{l|q}} get_scalar2
; CHECK: movl %eax, -124(
; CHECK: call{{l|q}} end_scalar2
; CHECK: call{{l|q}} get_scalar3
; CHECK: movl %eax, -128(
; CHECK: call{{l|q}} end_scalar3
; CHECK: call{{l|q}} get_addrof
; CHECK: movl %eax, -116(
; CHECK: call{{l|q}} end_addrof
; CHECK: get_small_nonchar
; CHECK: movw %ax, -100(
; CHECK: call{{l|q}} end_small_nonchar
; CHECK: call{{l|q}} get_large_nonchar
; CHECK: movl %eax, -48(
; CHECK: call{{l|q}} end_large_nonchar
; CHECK: call{{l|q}} get_small_char
; CHECK: movb %al, -102(
; CHECK: call{{l|q}} end_small_char
; CHECK: call{{l|q}} get_large_char
; CHECK: movb %al, -56(
; CHECK: call{{l|q}} end_large_char
; CHECK: call{{l|q}} get_struct_large_char
; CHECK: movb %al, -64(
; CHECK: call{{l|q}} end_struct_large_char
; CHECK: call{{l|q}} get_struct_small_char
; CHECK: movb %al, -104(
; CHECK: call{{l|q}} end_struct_small_char
; CHECK: call{{l|q}} get_struct_large_nonchar
; CHECK: movl %eax, -96(
; CHECK: call{{l|q}} end_struct_large_nonchar
; CHECK: call{{l|q}} get_struct_small_nonchar
; CHECK: movw %ax, -112(
; CHECK: call{{l|q}} end_struct_small_nonchar
%x = alloca i32, align 4
%y = alloca i32, align 4
%z = alloca i32, align 4
%ptr = alloca i32, align 4
%small2 = alloca [2 x i16], align 2
%large2 = alloca [8 x i32], align 16
%small = alloca [2 x i8], align 1
%large = alloca [8 x i8], align 1
%a = alloca %struct.struct_large_char, align 1
%b = alloca %struct.struct_small_char, align 1
%c = alloca %struct.struct_large_nonchar, align 8
%d = alloca %struct.struct_small_nonchar, align 2
%call = call i32 @get_scalar1()
store i32 %call, i32* %x, align 4
call void @end_scalar1()
%call1 = call i32 @get_scalar2()
store i32 %call1, i32* %y, align 4
call void @end_scalar2()
%call2 = call i32 @get_scalar3()
store i32 %call2, i32* %z, align 4
call void @end_scalar3()
%call3 = call i32 @get_addrof()
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
%arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
%arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
%arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
%arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
%foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
%arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
%foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
%arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
%foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
%arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
%foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
%arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
%arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
%arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
%arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
%arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
%coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
%coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
%coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
ret void
}
define void @fast_non_linux() ssp {
entry:
; FAST-NON-LIN: fast_non_linux:
@ -222,9 +497,6 @@ declare void @end_large_char()
declare signext i8 @get_struct_large_char()
declare void @end_struct_large_char()
declare signext i8 @get_struct_large_char2()
declare void @end_struct_large_char2()
declare signext i8 @get_struct_small_char()
declare void @end_struct_small_char()