1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 20:51:52 +01:00
llvm-mirror/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
Nick Desaulniers 5d27c8ae50 [Inline] prevent inlining on stack protector mismatch
It's common for code that manipulates the stack via inline assembly or
that has to set up its own stack canary (such as the Linux kernel) would
like to avoid stack protectors in certain functions. In this case, we've
been bitten by numerous bugs where a callee with a stack protector is
inlined into an attribute((no_stack_protector)) caller, which
generally breaks the caller's assumptions about not having a stack
protector. LTO exacerbates the issue.

While developers can avoid this by putting all no_stack_protector
functions in one translation unit together and compiling those with
-fno-stack-protector, it's generally not very ergonomic or as
ergonomic as a function attribute, and still doesn't work for LTO. See also:
https://lore.kernel.org/linux-pm/20200915172658.1432732-1-rkir@google.com/
https://lore.kernel.org/lkml/20200918201436.2932360-30-samitolvanen@google.com/T/#u

SSP attributes can be ordered by strength. Weakest to strongest, they
are: ssp, sspstrong, sspreq.  Callees with differing SSP attributes may be
inlined into each other, and the strongest attribute will be applied to the
caller. (No change)

After this change:
* A callee with no SSP attributes will no longer be inlined into a
  caller with SSP attributes.
* The reverse is also true: a callee with an SSP attribute will not be
  inlined into a caller with no SSP attributes.
* The alwaysinline attribute overrides these rules.

Functions that get synthesized by the compiler may not get inlined as a
result if they are not created with the same stack protector function
attribute as their callers.

Alternative approach to https://reviews.llvm.org/D87956.

Fixes pr/47479.

Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>

Reviewed By: rnk, MaskRay

Differential Revision: https://reviews.llvm.org/D91816
2020-12-02 11:00:16 -08:00

62 lines
2.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=pic -frame-pointer=all | FileCheck %s
@__stack_chk_guard = external global i64*
; PR20558
define i32 @test_stack_guard_remat2() ssp {
; CHECK-LABEL: test_stack_guard_remat2:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: sub sp, sp, #64 ; =64
; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
; CHECK-NEXT: add x29, sp, #48 ; =48
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: Lloh0:
; CHECK-NEXT: adrp x8, ___stack_chk_guard@GOTPAGE
; CHECK-NEXT: Lloh1:
; Load the stack guard for the second time, just in case the previous value gets spilled.
; CHECK-NEXT: adrp x9, ___stack_chk_guard@GOTPAGE
; CHECK-NEXT: Lloh2:
; CHECK-NEXT: ldr x8, [x8, ___stack_chk_guard@GOTPAGEOFF]
; CHECK-NEXT: Lloh3:
; CHECK-NEXT: ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
; CHECK-NEXT: Lloh4:
; CHECK-NEXT: ldr x8, [x8]
; CHECK-NEXT: Lloh5:
; CHECK-NEXT: ldr x9, [x9]
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: stur x9, [x29, #-8]
; CHECK-NEXT: Lloh6:
; CHECK-NEXT: adrp x9, ___stack_chk_guard@GOTPAGE
; CHECK-NEXT: ldur x8, [x29, #-8]
; CHECK-NEXT: Lloh7:
; CHECK-NEXT: ldr x9, [x9, ___stack_chk_guard@GOTPAGEOFF]
; CHECK-NEXT: Lloh8:
; CHECK-NEXT: ldr x9, [x9]
; CHECK-NEXT: cmp x9, x8
; CHECK-NEXT: b.ne LBB0_2
; CHECK-NEXT: ; %bb.1: ; %entry
; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
; CHECK-NEXT: mov w0, #-1
; CHECK-NEXT: add sp, sp, #64 ; =64
; CHECK-NEXT: ret
; CHECK-NEXT: LBB0_2: ; %entry
; CHECK-NEXT: bl ___stack_chk_fail
; CHECK-NEXT: .loh AdrpLdrGotLdr Lloh6, Lloh7, Lloh8
; CHECK-NEXT: .loh AdrpLdrGotLdr Lloh1, Lloh3, Lloh5
; CHECK-NEXT: .loh AdrpLdrGotLdr Lloh0, Lloh2, Lloh4
entry:
%StackGuardSlot = alloca i8*
%StackGuard = load i8*, i8** bitcast (i64** @__stack_chk_guard to i8**)
call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
%container = alloca [32 x i8], align 1
call void @llvm.stackprotectorcheck(i8** bitcast (i64** @__stack_chk_guard to i8**))
ret i32 -1
}
declare void @llvm.stackprotector(i8*, i8**) ssp
declare void @llvm.stackprotectorcheck(i8**) ssp