1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 11:13:28 +01:00

Control-Flow Enforcement Technology - Shadow Stack support (LLVM side)

Shadow stack solution introduces a new stack for return addresses only.
The HW has a Shadow Stack Pointer (SSP) that points to the next return address.
If we return to a different address, an exception is triggered.
The shadow stack is managed using a series of intrinsics that are introduced in this patch as well as the new register (SSP).
The intrinsics are mapped to new instruction set that implements CET mechanism.

The patch also includes initial infrastructure support for IBT.

For more information, please see the following:
https://software.intel.com/sites/default/files/managed/4d/2a/control-flow-enforcement-technology-preview.pdf

Differential Revision: https://reviews.llvm.org/D40223

Change-Id: I4daa1f27e88176be79a4ac3b4cd26a459e88fed4
llvm-svn: 318996
This commit is contained in:
Oren Ben Simhon 2017-11-26 13:02:45 +00:00
parent 801b34c954
commit 78031089be
22 changed files with 636 additions and 97 deletions

View File

@ -63,6 +63,35 @@ let TargetPrefix = "x86" in {
Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
}
//===----------------------------------------------------------------------===//
// CET SS
let TargetPrefix = "x86" in {
def int_x86_incsspd : GCCBuiltin<"__builtin_ia32_incsspd">,
Intrinsic<[], [llvm_i32_ty], []>;
def int_x86_incsspq : GCCBuiltin<"__builtin_ia32_incsspq">,
Intrinsic<[], [llvm_i64_ty], []>;
def int_x86_rdsspd : GCCBuiltin<"__builtin_ia32_rdsspd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
def int_x86_rdsspq : GCCBuiltin<"__builtin_ia32_rdsspq">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty], []>;
def int_x86_saveprevssp : GCCBuiltin<"__builtin_ia32_saveprevssp">,
Intrinsic<[], [], []>;
def int_x86_rstorssp : GCCBuiltin<"__builtin_ia32_rstorssp">,
Intrinsic<[], [llvm_ptr_ty], []>;
def int_x86_wrssd : GCCBuiltin<"__builtin_ia32_wrssd">,
Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
def int_x86_wrssq : GCCBuiltin<"__builtin_ia32_wrssq">,
Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], []>;
def int_x86_wrussd : GCCBuiltin<"__builtin_ia32_wrussd">,
Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
def int_x86_wrussq : GCCBuiltin<"__builtin_ia32_wrussq">,
Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], []>;
def int_x86_setssbsy : GCCBuiltin<"__builtin_ia32_setssbsy">,
Intrinsic<[], [], []>;
def int_x86_clrssbsy : GCCBuiltin<"__builtin_ia32_clrssbsy">,
Intrinsic<[], [llvm_ptr_ty], []>;
}
//===----------------------------------------------------------------------===//
// 3DNow!

View File

@ -1217,6 +1217,7 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
Features["avx512vbmi"] = HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save;
Features["pku"] = HasLeaf7 && ((ECX >> 4) & 1);
Features["avx512vbmi2"] = HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save;
Features["shstk"] = HasLeaf7 && ((ECX >> 7) & 1);
Features["gfni"] = HasLeaf7 && ((ECX >> 8) & 1);
Features["vaes"] = HasLeaf7 && ((ECX >> 9) & 1) && HasAVXSave;
Features["vpclmulqdq"] = HasLeaf7 && ((ECX >> 10) & 1) && HasAVXSave;
@ -1224,6 +1225,8 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
Features["avx512bitalg"] = HasLeaf7 && ((ECX >> 12) & 1) && HasAVX512Save;
Features["avx512vpopcntdq"] = HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save;
Features["ibt"] = HasLeaf7 && ((EDX >> 20) & 1);
bool HasLeafD = MaxLevel >= 0xd &&
!getX86CpuIDAndInfoEx(0xd, 0x1, &EAX, &EBX, &ECX, &EDX);

View File

@ -213,6 +213,10 @@ def FeatureADX : SubtargetFeature<"adx", "HasADX", "true",
def FeatureSHA : SubtargetFeature<"sha", "HasSHA", "true",
"Enable SHA instructions",
[FeatureSSE2]>;
def FeatureSHSTK : SubtargetFeature<"shstk", "HasSHSTK", "true",
"Support CET Shadow-Stack instructions">;
def FeatureIBT : SubtargetFeature<"ibt", "HasIBT", "true",
"Support CET Indirect-Branch-Tracking instructions">;
def FeaturePRFCHW : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
"Support PRFCHW instructions">;
def FeatureRDSEED : SubtargetFeature<"rdseed", "HasRDSEED", "true",

View File

@ -32,7 +32,7 @@ def GetLo8XForm : SDNodeXForm<imm, [{
// PIC base construction. This expands to code that looks like this:
// call $next_inst
// popl %destreg"
let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP, SSP] in
def MOVPC32r : Ii32<0xE8, Pseudo, (outs GR32:$reg), (ins i32imm:$label),
"", []>;
@ -42,7 +42,7 @@ let hasSideEffects = 0, isNotDuplicable = 1, Uses = [ESP] in
// pointer before prolog-epilog rewriting occurs.
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [ESP, EFLAGS], Uses = [ESP] in {
let Defs = [ESP, EFLAGS, SSP], Uses = [ESP, SSP] in {
def ADJCALLSTACKDOWN32 : I<0, Pseudo, (outs),
(ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
"#ADJCALLSTACKDOWN",
@ -62,7 +62,7 @@ def : Pat<(X86callseq_start timm:$amt1, timm:$amt2),
// pointer before prolog-epilog rewriting occurs.
// Pessimistically assume ADJCALLSTACKDOWN / ADJCALLSTACKUP will become
// sub / add which can clobber EFLAGS.
let Defs = [RSP, EFLAGS], Uses = [RSP] in {
let Defs = [RSP, EFLAGS, SSP], Uses = [RSP, SSP] in {
def ADJCALLSTACKDOWN64 : I<0, Pseudo, (outs),
(ins i32imm:$amt1, i32imm:$amt2, i32imm:$amt3),
"#ADJCALLSTACKDOWN",
@ -458,7 +458,7 @@ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
usesCustomInserter = 1, Uses = [ESP] in {
usesCustomInserter = 1, Uses = [ESP, SSP] in {
def TLS_addr32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLS_addr32",
[(X86tlsaddr tls32addr:$sym)]>,
@ -478,7 +478,7 @@ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
usesCustomInserter = 1, Uses = [RSP] in {
usesCustomInserter = 1, Uses = [RSP, SSP] in {
def TLS_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLS_addr64",
[(X86tlsaddr tls64addr:$sym)]>,
@ -494,7 +494,7 @@ def TLS_base_addr64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
// address of the variable is in %eax. %ecx is trashed during the function
// call. All other registers are preserved.
let Defs = [EAX, ECX, EFLAGS],
Uses = [ESP],
Uses = [ESP, SSP],
usesCustomInserter = 1 in
def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
"# TLSCall_32",
@ -507,7 +507,7 @@ def TLSCall_32 : I<0, Pseudo, (outs), (ins i32mem:$sym),
// On return the address of the variable is in %rax. All other
// registers are preserved.
let Defs = [RAX, EFLAGS],
Uses = [RSP],
Uses = [RSP, SSP],
usesCustomInserter = 1 in
def TLSCall_64 : I<0, Pseudo, (outs), (ins i64mem:$sym),
"# TLSCall_64",

View File

@ -191,7 +191,7 @@ let isCall = 1 in
// a use to prevent stack-pointer assignments that appear immediately
// before calls from potentially appearing dead. Uses for argument
// registers are added manually.
let Uses = [ESP] in {
let Uses = [ESP, SSP] in {
def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm,
(outs), (ins i32imm_pcrel:$dst),
"call{l}\t$dst", [], IIC_CALL_RI>, OpSize32,
@ -241,7 +241,7 @@ let isCall = 1 in
// Tail call stuff.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
let Uses = [ESP] in {
let Uses = [ESP, SSP] in {
def TCRETURNdi : PseudoI<(outs),
(ins i32imm_pcrel:$dst, i32imm:$offset), []>, NotMemoryFoldable;
def TCRETURNri : PseudoI<(outs),
@ -268,7 +268,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
// rather than barriers, and they use EFLAGS.
let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
let Uses = [ESP, EFLAGS] in {
let Uses = [ESP, EFLAGS, SSP] in {
def TCRETURNdicc : PseudoI<(outs),
(ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>;
@ -287,7 +287,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
// RSP is marked as a use to prevent stack-pointer assignments that appear
// immediately before calls from potentially appearing dead. Uses for argument
// registers are added manually.
let isCall = 1, Uses = [RSP], SchedRW = [WriteJump] in {
let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in {
// NOTE: this pattern doesn't match "X86call imm", because we do not know
// that the offset between an arbitrary immediate and the call will fit in
// the 32-bit pcrel field that we have.
@ -309,7 +309,7 @@ let isCall = 1, Uses = [RSP], SchedRW = [WriteJump] in {
}
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
isCodeGenOnly = 1, Uses = [RSP], usesCustomInserter = 1,
isCodeGenOnly = 1, Uses = [RSP, SSP], usesCustomInserter = 1,
SchedRW = [WriteJump] in {
def TCRETURNdi64 : PseudoI<(outs),
(ins i64i32imm_pcrel:$dst, i32imm:$offset),
@ -345,7 +345,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
// rather than barriers, and they use EFLAGS.
let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
let Uses = [RSP, EFLAGS] in {
let Uses = [RSP, EFLAGS, SSP] in {
def TCRETURNdi64cc : PseudoI<(outs),
(ins i64i32imm_pcrel:$dst, i32imm:$offset,
i32imm:$cond), []>;

View File

@ -881,6 +881,8 @@ def HasCLZERO : Predicate<"Subtarget->hasCLZERO()">;
def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def HasMPX : Predicate<"Subtarget->hasMPX()">;
def HasSHSTK : Predicate<"Subtarget->hasSHSTK()">;
def HasIBT : Predicate<"Subtarget->hasIBT()">;
def HasCLFLUSHOPT : Predicate<"Subtarget->hasCLFLUSHOPT()">;
def HasCLWB : Predicate<"Subtarget->hasCLWB()">;
def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">;

View File

@ -480,6 +480,64 @@ def INVD : I<0x08, RawFrm, (outs), (ins), "invd", [], IIC_INVD>, TB;
def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", [], IIC_INVD>, TB;
} // SchedRW
//===----------------------------------------------------------------------===//
// CET instructions
let SchedRW = [WriteSystem], Predicates = [HasSHSTK] in{
let Uses = [SSP] in {
let Defs = [SSP] in {
def INCSSPD : I<0xAE, MRM5r, (outs), (ins GR32:$src), "incsspd\t$src",
[(int_x86_incsspd GR32:$src)]>, XS;
def INCSSPQ : RI<0xAE, MRM5r, (outs), (ins GR64:$src), "incsspq\t$src",
[(int_x86_incsspq GR64:$src)]>, XS,
Requires<[In64BitMode]>;
} // Defs SSP
let Constraints = "$src = $dst" in {
def RDSSPD : I<0x1E, MRM1r, (outs GR32:$dst), (ins GR32:$src),
"rdsspd\t$dst",
[(set GR32:$dst, (int_x86_rdsspd GR32:$src))]>, XS;
def RDSSPQ : RI<0x1E, MRM1r, (outs GR64:$dst), (ins GR64:$src),
"rdsspq\t$dst",
[(set GR64:$dst, (int_x86_rdsspq GR64:$src))]>, XS,
Requires<[In64BitMode]>;
}
let Defs = [SSP] in {
def SAVEPREVSSP : I<0x01, MRM_EA, (outs), (ins), "saveprevssp",
[(int_x86_saveprevssp)]>, XS;
def RSTORSSP : I<0x01, MRM5m, (outs), (ins i32mem:$src),
"rstorssp\t$src",
[(int_x86_rstorssp addr:$src)]>, XS;
} // Defs SSP
} // Uses SSP
def WRSSD : I<0xF6, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrssd\t{$src, $dst|$dst, $src}",
[(int_x86_wrssd GR32:$src, addr:$dst)]>, T8;
def WRSSQ : RI<0xF6, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrssq\t{$src, $dst|$dst, $src}",
[(int_x86_wrssq GR64:$src, addr:$dst)]>, T8,
Requires<[In64BitMode]>;
def WRUSSD : I<0xF5, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"wrussd\t{$src, $dst|$dst, $src}",
[(int_x86_wrussd GR32:$src, addr:$dst)]>, T8PD;
def WRUSSQ : RI<0xF5, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
"wrussq\t{$src, $dst|$dst, $src}",
[(int_x86_wrussq GR64:$src, addr:$dst)]>, T8PD,
Requires<[In64BitMode]>;
let Defs = [SSP] in {
let Uses = [SSP] in {
def SETSSBSY : I<0x01, MRM_E8, (outs), (ins), "setssbsy",
[(int_x86_setssbsy)]>, XS;
} // Uses SSP
def CLRSSBSY : I<0xAE, MRM6m, (outs), (ins i32mem:$src),
"clrssbsy\t$src",
[(int_x86_clrssbsy addr:$src)]>, XS;
} // Defs SSP
} // SchedRW && HasSHSTK
//===----------------------------------------------------------------------===//
// XSAVE instructions
let SchedRW = [WriteSystem] in {

View File

@ -507,6 +507,9 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
++I)
Reserved.set(*I);
// Set the Shadow Stack Pointer as reserved.
Reserved.set(X86::SSP);
// Set the instruction pointer register and its aliases as reserved.
for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
++I)

View File

@ -308,6 +308,9 @@ def BND1 : X86Reg<"bnd1", 1>;
def BND2 : X86Reg<"bnd2", 2>;
def BND3 : X86Reg<"bnd3", 3>;
// CET registers - Shadow Stack Pointer
def SSP : X86Reg<"ssp", 0>;
//===----------------------------------------------------------------------===//
// Register Class Definitions... now that we have all of the pieces, define the
// top-level register classes. The order specified in the register list is

View File

@ -334,6 +334,8 @@ void X86Subtarget::initializeEnvironment() {
HasMWAITX = false;
HasCLZERO = false;
HasMPX = false;
HasSHSTK = false;
HasIBT = false;
HasSGX = false;
HasCLFLUSHOPT = false;
HasCLWB = false;

View File

@ -320,6 +320,14 @@ protected:
/// Processor supports MPX - Memory Protection Extensions
bool HasMPX;
/// Processor supports CET SHSTK - Control-Flow Enforcement Technology
/// using Shadow Stack
bool HasSHSTK;
/// Processor supports CET IBT - Control-Flow Enforcement Technology
/// using Indirect Branch Tracking
bool HasIBT;
/// Processor has Software Guard Extensions
bool HasSGX;
@ -548,6 +556,8 @@ public:
bool hasVNNI() const { return HasVNNI; }
bool hasBITALG() const { return HasBITALG; }
bool hasMPX() const { return HasMPX; }
bool hasSHSTK() const { return HasSHSTK; }
bool hasIBT() const { return HasIBT; }
bool hasCLFLUSHOPT() const { return HasCLFLUSHOPT; }
bool hasCLWB() const { return HasCLWB; }

View File

@ -60,11 +60,11 @@ body: |
liveins: %eax
MOV32mr %stack.0.tmp, 1, _, 0, _, killed %eax
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def dead %eflags, implicit %rsp
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %ssp, implicit-def dead %eflags, implicit %rsp, implicit %ssp
%rsi = LEA64r %stack.0.tmp, 1, _, 0, _
%edi = MOV32r0 implicit-def dead %eflags
CALL64pcrel32 @doSomething, csr_64, implicit %rsp, implicit %edi, implicit %rsi, implicit-def %rsp, implicit-def %eax
ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def dead %eflags, implicit %rsp
CALL64pcrel32 @doSomething, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %rsi, implicit-def %rsp, implicit-def %ssp, implicit-def %eax
ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %ssp, implicit-def dead %eflags, implicit %rsp, implicit %ssp
bb.3.false:
liveins: %eax

View File

@ -403,14 +403,14 @@ declare void @trivial_callee()
define void @test_trivial_call() {
; ALL-LABEL: name: test_trivial_call
; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: CALLpcrel32 @trivial_callee, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: CALL64pcrel32 @trivial_callee, csr_64, implicit %rsp
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
call void @trivial_callee()
@ -432,7 +432,7 @@ define void @test_simple_arg(i32 %in0, i32 %in1) {
; X32-NEXT: %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X32-NEXT: %3:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32-NEXT: %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32-NEXT: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %4:_(p0) = COPY %esp
; X32-NEXT: %5:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %6:_(p0) = G_GEP %4, %5(s32)
@ -442,16 +442,16 @@ define void @test_simple_arg(i32 %in0, i32 %in1) {
; X32-NEXT: %9:_(p0) = G_GEP %7, %8(s32)
; X32-NEXT: G_STORE %0(s32), %9(p0) :: (store 4 into stack + 4, align 0)
; X32-NEXT: CALLpcrel32 @simple_arg_callee, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: %0:_(s32) = COPY %edi
; X64-NEXT: %1:_(s32) = COPY %esi
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %edi = COPY %1(s32)
; X64-NEXT: %esi = COPY %0(s32)
; X64-NEXT: CALL64pcrel32 @simple_arg_callee, csr_64, implicit %rsp, implicit %edi, implicit %esi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @simple_arg_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
call void @simple_arg_callee(i32 %in1, i32 %in0)
@ -469,7 +469,7 @@ define void @test_simple_arg8_call(i32 %in0) {
; X32-NEXT: bb.1 (%ir-block.0):
; X32-NEXT: %1:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32-NEXT: %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32-NEXT: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %2:_(p0) = COPY %esp
; X32-NEXT: %3:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %4:_(p0) = G_GEP %2, %3(s32)
@ -503,11 +503,11 @@ define void @test_simple_arg8_call(i32 %in0) {
; X32-NEXT: %25:_(p0) = G_GEP %23, %24(s32)
; X32-NEXT: G_STORE %0(s32), %25(p0) :: (store 4 into stack + 28, align 0)
; X32-NEXT: CALLpcrel32 @simple_arg8_callee, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 32, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 32, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: %0:_(s32) = COPY %edi
; X64-NEXT: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %edi = COPY %0(s32)
; X64-NEXT: %esi = COPY %0(s32)
; X64-NEXT: %edx = COPY %0(s32)
@ -522,8 +522,8 @@ define void @test_simple_arg8_call(i32 %in0) {
; X64-NEXT: %5:_(s64) = G_CONSTANT i64 8
; X64-NEXT: %6:_(p0) = G_GEP %4, %5(s64)
; X64-NEXT: G_STORE %0(s32), %6(p0) :: (store 4 into stack + 8, align 0)
; X64-NEXT: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit %rsp, implicit %edi, implicit %esi, implicit %edx, implicit %ecx, implicit %r8d, implicit %r9d
; X64-NEXT: ADJCALLSTACKUP64 16, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi, implicit %edx, implicit %ecx, implicit %r8d, implicit %r9d
; X64-NEXT: ADJCALLSTACKUP64 16, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0)
@ -535,24 +535,24 @@ define i32 @test_simple_return_callee() {
; ALL-LABEL: name: test_simple_return_callee
; X32: %1:_(s32) = G_CONSTANT i32 5
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %2:_(p0) = COPY %esp
; X32-NEXT: %3:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %4:_(p0) = G_GEP %2, %3(s32)
; X32-NEXT: G_STORE %1(s32), %4(p0) :: (store 4 into stack, align 0)
; X32-NEXT: CALLpcrel32 @simple_return_callee, csr_32, implicit %esp, implicit-def %eax
; X32-NEXT: CALLpcrel32 @simple_return_callee, csr_32, implicit %esp, implicit %ssp, implicit-def %eax
; X32-NEXT: %0:_(s32) = COPY %eax
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %5:_(s32) = G_ADD %0, %0
; X32-NEXT: %eax = COPY %5(s32)
; X32-NEXT: RET 0, implicit %eax
; X64: %1:_(s32) = G_CONSTANT i32 5
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %edi = COPY %1(s32)
; X64-NEXT: CALL64pcrel32 @simple_return_callee, csr_64, implicit %rsp, implicit %edi, implicit-def %eax
; X64-NEXT: CALL64pcrel32 @simple_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit-def %eax
; X64-NEXT: %0:_(s32) = COPY %eax
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %2:_(s32) = G_ADD %0, %0
; X64-NEXT: %eax = COPY %2(s32)
; X64-NEXT: RET 0, implicit %eax
@ -576,15 +576,15 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-NEXT: %5:_(<4 x s32>) = G_LOAD %6(p0) :: (invariant load 16 from %fixed-stack.0, align 0)
; X32-NEXT: %0:_(<8 x s32>) = G_MERGE_VALUES %2(<4 x s32>), %3(<4 x s32>)
; X32-NEXT: %1:_(<8 x s32>) = G_MERGE_VALUES %4(<4 x s32>), %5(<4 x s32>)
; X32-NEXT: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %8:_(<4 x s32>), %9:_(<4 x s32>) = G_UNMERGE_VALUES %1(<8 x s32>)
; X32-NEXT: %xmm0 = COPY %8(<4 x s32>)
; X32-NEXT: %xmm1 = COPY %9(<4 x s32>)
; X32-NEXT: CALLpcrel32 @split_return_callee, csr_32, implicit %esp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
; X32-NEXT: CALLpcrel32 @split_return_callee, csr_32, implicit %esp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
; X32-NEXT: %10:_(<4 x s32>) = COPY %xmm0
; X32-NEXT: %11:_(<4 x s32>) = COPY %xmm1
; X32-NEXT: %7:_(<8 x s32>) = G_MERGE_VALUES %10(<4 x s32>), %11(<4 x s32>)
; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %12:_(<8 x s32>) = G_ADD %0, %7
; X32-NEXT: %13:_(<4 x s32>), %14:_(<4 x s32>) = G_UNMERGE_VALUES %12(<8 x s32>)
; X32-NEXT: %xmm0 = COPY %13(<4 x s32>)
@ -597,15 +597,15 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X64-NEXT: %5:_(<4 x s32>) = COPY %xmm3
; X64-NEXT: %0:_(<8 x s32>) = G_MERGE_VALUES %2(<4 x s32>), %3(<4 x s32>)
; X64-NEXT: %1:_(<8 x s32>) = G_MERGE_VALUES %4(<4 x s32>), %5(<4 x s32>)
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %7:_(<4 x s32>), %8:_(<4 x s32>) = G_UNMERGE_VALUES %1(<8 x s32>)
; X64-NEXT: %xmm0 = COPY %7(<4 x s32>)
; X64-NEXT: %xmm1 = COPY %8(<4 x s32>)
; X64-NEXT: CALL64pcrel32 @split_return_callee, csr_64, implicit %rsp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
; X64-NEXT: CALL64pcrel32 @split_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
; X64-NEXT: %9:_(<4 x s32>) = COPY %xmm0
; X64-NEXT: %10:_(<4 x s32>) = COPY %xmm1
; X64-NEXT: %6:_(<8 x s32>) = G_MERGE_VALUES %9(<4 x s32>), %10(<4 x s32>)
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %11:_(<8 x s32>) = G_ADD %0, %6
; X64-NEXT: %12:_(<4 x s32>), %13:_(<4 x s32>) = G_UNMERGE_VALUES %11(<8 x s32>)
; X64-NEXT: %xmm0 = COPY %12(<4 x s32>)
@ -625,17 +625,17 @@ define void @test_indirect_call(void()* %func) {
; X32-NEXT: - { id: 1, class: _, preferred-register: '' }
; X32: %1:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32-NEXT: %0:gr32(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32-NEXT: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: CALL32r %0(p0), csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: registers:
; X64-NEXT: - { id: 0, class: gr64, preferred-register: '' }
; X64: %0:gr64(p0) = COPY %rdi
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: CALL64r %0(p0), csr_64, implicit %rsp
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
call void %func()
@ -653,49 +653,49 @@ define void @test_abi_exts_call(i8* %addr) {
; X32: %1:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32-NEXT: %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32-NEXT: %2:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %3:_(p0) = COPY %esp
; X32-NEXT: %4:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %5:_(p0) = G_GEP %3, %4(s32)
; X32-NEXT: %6:_(s32) = G_ANYEXT %2(s8)
; X32-NEXT: G_STORE %6(s32), %5(p0) :: (store 4 into stack, align 0)
; X32-NEXT: CALLpcrel32 @take_char, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %7:_(p0) = COPY %esp
; X32-NEXT: %8:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %9:_(p0) = G_GEP %7, %8(s32)
; X32-NEXT: %10:_(s32) = G_SEXT %2(s8)
; X32-NEXT: G_STORE %10(s32), %9(p0) :: (store 4 into stack, align 0)
; X32-NEXT: CALLpcrel32 @take_char, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %11:_(p0) = COPY %esp
; X32-NEXT: %12:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %13:_(p0) = G_GEP %11, %12(s32)
; X32-NEXT: %14:_(s32) = G_ZEXT %2(s8)
; X32-NEXT: G_STORE %14(s32), %13(p0) :: (store 4 into stack, align 0)
; X32-NEXT: CALLpcrel32 @take_char, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: %0:_(p0) = COPY %rdi
; X64-NEXT: %1:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %2:_(s32) = G_ANYEXT %1(s8)
; X64-NEXT: %edi = COPY %2(s32)
; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %edi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %3:_(s32) = G_SEXT %1(s8)
; X64-NEXT: %edi = COPY %3(s32)
; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %edi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %4:_(s32) = G_ZEXT %1(s8)
; X64-NEXT: %edi = COPY %4(s32)
; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %edi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
%val = load i8, i8* %addr
@ -720,7 +720,7 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
; X32-NEXT: %1:_(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32-NEXT: %4:_(p0) = G_LOAD %0(p0) :: (load 4 from %ir.addr_ptr)
; X32-NEXT: %5:_(s32) = G_LOAD %1(p0) :: (load 4 from %ir.val_ptr)
; X32-NEXT: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %6:_(p0) = COPY %esp
; X32-NEXT: %7:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %8:_(p0) = G_GEP %6, %7(s32)
@ -730,19 +730,19 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
; X32-NEXT: %11:_(p0) = G_GEP %9, %10(s32)
; X32-NEXT: G_STORE %5(s32), %11(p0) :: (store 4 into stack + 4, align 0)
; X32-NEXT: CALLpcrel32 @variadic_callee, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: %0:_(p0) = COPY %rdi
; X64-NEXT: %1:_(p0) = COPY %rsi
; X64-NEXT: %2:_(p0) = G_LOAD %0(p0) :: (load 8 from %ir.addr_ptr)
; X64-NEXT: %3:_(s32) = G_LOAD %1(p0) :: (load 4 from %ir.val_ptr)
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %rdi = COPY %2(p0)
; X64-NEXT: %esi = COPY %3(s32)
; X64-NEXT: %al = MOV8ri 0
; X64-NEXT: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit %al
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit %al
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
%addr = load i8*, i8** %addr_ptr
@ -765,7 +765,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
; X32-NEXT: %1:_(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32-NEXT: %4:_(p0) = G_LOAD %0(p0) :: (load 4 from %ir.addr_ptr)
; X32-NEXT: %5:_(s64) = G_LOAD %1(p0) :: (load 8 from %ir.val_ptr, align 4)
; X32-NEXT: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: %6:_(p0) = COPY %esp
; X32-NEXT: %7:_(s32) = G_CONSTANT i32 0
; X32-NEXT: %8:_(p0) = G_GEP %6, %7(s32)
@ -775,18 +775,18 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
; X32-NEXT: %11:_(p0) = G_GEP %9, %10(s32)
; X32-NEXT: G_STORE %5(s64), %11(p0) :: (store 8 into stack + 4, align 0)
; X32-NEXT: CALLpcrel32 @variadic_callee, csr_32, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 12, 0, implicit-def %esp, implicit-def %eflags, implicit %esp
; X32-NEXT: ADJCALLSTACKUP32 12, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
; X32-NEXT: RET 0
; X64: %1:_(p0) = COPY %rsi
; X64-NEXT: %2:_(p0) = G_LOAD %0(p0) :: (load 8 from %ir.addr_ptr)
; X64-NEXT: %3:_(s64) = G_LOAD %1(p0) :: (load 8 from %ir.val_ptr)
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: %rdi = COPY %2(p0)
; X64-NEXT: %xmm0 = COPY %3(s64)
; X64-NEXT: %al = MOV8ri 1
; X64-NEXT: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %rdi, implicit %xmm0, implicit %al
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit %rsp
; X64-NEXT: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %xmm0, implicit %al
; X64-NEXT: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
; X64-NEXT: RET 0
%addr = load i8*, i8** %addr_ptr

View File

@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
; CHECK: foo Clobbered Registers: CS DS EFLAGS EIP EIZ ES FPSW FS GS IP RIP RIZ SS BND0 BND1 BND2 BND3 CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 FP0 FP1 FP2 FP3 FP4 FP5 FP6 FP7 K0 K1 K2 K3 K4 K5 K6 K7 MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 R11 ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 R11B R11D R11W
; CHECK: foo Clobbered Registers: CS DS EFLAGS EIP EIZ ES FPSW FS GS IP RIP RIZ SS SSP BND0 BND1 BND2 BND3 CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 FP0 FP1 FP2 FP3 FP4 FP5 FP6 FP7 K0 K1 K2 K3 K4 K5 K6 K7 MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 R11 ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 R11B R11D R11W
call void @bar1()
call void @bar2()
ret void

View File

@ -33,14 +33,14 @@
...
---
# CHECK-LABEL: test9
# CHECK: ADJCALLSTACKDOWN32 16, 0, 16, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
# CHECK: ADJCALLSTACKDOWN32 16, 0, 16, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
# CHECK-NEXT: PUSH32i8 4, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32i8 3, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32i8 2, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32i8 1, implicit-def %esp, implicit %esp
# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit %esp, implicit-def %esp
# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, _, 0, _ :: (load 4 from %stack.2.s, align 8)
# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, _, 4, _ :: (load 4 from %stack.2.s + 4)
# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, _, 0, _
@ -50,8 +50,8 @@
# CHECK-NEXT: PUSH32i8 6, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32r %2, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32r %1, implicit-def %esp, implicit %esp
# CHECK-NEXT: CALLpcrel32 @struct, csr_32, implicit %esp, implicit-def %esp
# CHECK-NEXT: ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
# CHECK-NEXT: CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
# CHECK-NEXT: ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
# CHECK-NEXT: RET 0
name: test9
alignment: 0
@ -99,15 +99,15 @@ stack:
constants:
body: |
bb.0.entry:
ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
%0 = COPY %esp
MOV32mi %0, 1, _, 12, _, 4 :: (store 4 into stack + 12)
MOV32mi %0, 1, _, 8, _, 3 :: (store 4 into stack + 8)
MOV32mi %0, 1, _, 4, _, 2 :: (store 4 into stack + 4)
MOV32mi %0, 1, _, 0, _, 1 :: (store 4 into stack)
CALLpcrel32 @good, csr_32, implicit %esp, implicit-def %esp
ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
%1 = MOV32rm %stack.2.s, 1, _, 0, _ :: (load 4 from %stack.2.s, align 8)
%2 = MOV32rm %stack.2.s, 1, _, 4, _ :: (load 4 from %stack.2.s + 4)
%3 = COPY %esp
@ -118,8 +118,8 @@ body: |
%5 = LEA32r %stack.1.q, 1, _, 0, _
MOV32mr %3, 1, _, 12, _, killed %5 :: (store 4 into stack + 12)
MOV32mi %3, 1, _, 8, _, 6 :: (store 4 into stack + 8)
CALLpcrel32 @struct, csr_32, implicit %esp, implicit-def %esp
ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit %esp
CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp,
ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
RET 0
...

View File

@ -48,7 +48,7 @@ body: |
; CHECK-NEXT: %rdi = COPY %rsi
; CHECK-NEXT: %rsi = COPY %rax
; CHECK-NEXT: CMP64ri8 %rax, 9, implicit-def %eflags
; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit %rsp, implicit %eflags, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rax, implicit-def %rax, implicit %sil, implicit-def %sil, implicit %si, implicit-def %si, implicit %esi, implicit-def %esi, implicit %rsi, implicit-def %rsi, implicit %dil, implicit-def %dil, implicit %di, implicit-def %di, implicit %edi, implicit-def %edi, implicit %rdi, implicit-def %rdi, implicit %ah, implicit-def %ah, implicit %al, implicit-def %al, implicit %ax, implicit-def %ax, implicit %eax, implicit-def %eax
; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit %rsp, implicit %eflags, implicit %ssp, implicit %rsp, implicit %rdi, implicit %rsi, implicit %rax, implicit-def %rax, implicit %sil, implicit-def %sil, implicit %si, implicit-def %si, implicit %esi, implicit-def %esi, implicit %rsi, implicit-def %rsi, implicit %dil, implicit-def %dil, implicit %di, implicit-def %di, implicit %edi, implicit-def %edi, implicit %rdi, implicit-def %rdi, implicit %ah, implicit-def %ah, implicit %al, implicit-def %al, implicit %ax, implicit-def %ax, implicit %eax, implicit-def %eax
bb.1:
successors: %bb.2, %bb.3

View File

@ -0,0 +1,106 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+shstk -mattr=+ibt | FileCheck %s
define void @test_incsspd(i32 %a) local_unnamed_addr {
; CHECK-LABEL: test_incsspd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: incsspd %eax
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.incsspd(i32 %a)
ret void
}
declare void @llvm.x86.incsspd(i32)
define i32 @test_rdsspd(i32 %a) {
; CHECK-LABEL: test_rdsspd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: rdsspd %eax
; CHECK-NEXT: retl
entry:
%0 = call i32 @llvm.x86.rdsspd(i32 %a)
ret i32 %0
}
declare i32 @llvm.x86.rdsspd(i32)
define void @test_saveprevssp() {
; CHECK-LABEL: test_saveprevssp:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: saveprevssp
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.saveprevssp()
ret void
}
declare void @llvm.x86.saveprevssp()
define void @test_rstorssp(i8* %__p) {
; CHECK-LABEL: test_rstorssp:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: rstorssp (%eax)
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.rstorssp(i8* %__p)
ret void
}
declare void @llvm.x86.rstorssp(i8*)
define void @test_wrssd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrssd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: wrssd %eax, (%ecx)
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.wrssd(i32 %a, i8* %__p)
ret void
}
declare void @llvm.x86.wrssd(i32, i8*)
define void @test_wrussd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrussd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: wrussd %eax, (%ecx)
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.wrussd(i32 %a, i8* %__p)
ret void
}
declare void @llvm.x86.wrussd(i32, i8*)
define void @test_setssbsy() {
; CHECK-LABEL: test_setssbsy:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: setssbsy
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.setssbsy()
ret void
}
declare void @llvm.x86.setssbsy()
define void @test_clrssbsy(i8* %__p) {
; CHECK-LABEL: test_clrssbsy:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: clrssbsy (%eax)
; CHECK-NEXT: retl
entry:
tail call void @llvm.x86.clrssbsy(i8* %__p)
ret void
}
declare void @llvm.x86.clrssbsy(i8* %__p)

View File

@ -0,0 +1,150 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+shstk -mattr=+ibt | FileCheck %s
define void @test_incsspd(i32 %a) local_unnamed_addr {
; CHECK-LABEL: test_incsspd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: incsspd %edi
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.incsspd(i32 %a)
ret void
}
declare void @llvm.x86.incsspd(i32)
define void @test_incsspq(i32 %a) local_unnamed_addr {
; CHECK-LABEL: test_incsspq:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movslq %edi, %rax
; CHECK-NEXT: incsspq %rax
; CHECK-NEXT: retq
entry:
%conv.i = sext i32 %a to i64
tail call void @llvm.x86.incsspq(i64 %conv.i)
ret void
}
declare void @llvm.x86.incsspq(i64)
define i32 @test_rdsspd(i32 %a) {
; CHECK-LABEL: test_rdsspd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: rdsspd %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
entry:
%0 = call i32 @llvm.x86.rdsspd(i32 %a)
ret i32 %0
}
declare i32 @llvm.x86.rdsspd(i32)
define i64 @test_rdsspq(i64 %a) {
; CHECK-LABEL: test_rdsspq:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: rdsspq %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
entry:
%0 = call i64 @llvm.x86.rdsspq(i64 %a)
ret i64 %0
}
declare i64 @llvm.x86.rdsspq(i64)
define void @test_saveprevssp() {
; CHECK-LABEL: test_saveprevssp:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: saveprevssp
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.saveprevssp()
ret void
}
declare void @llvm.x86.saveprevssp()
define void @test_rstorssp(i8* %__p) {
; CHECK-LABEL: test_rstorssp:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: rstorssp (%rdi)
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.rstorssp(i8* %__p)
ret void
}
declare void @llvm.x86.rstorssp(i8*)
define void @test_wrssd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrssd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: wrssd %edi, (%rsi)
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.wrssd(i32 %a, i8* %__p)
ret void
}
declare void @llvm.x86.wrssd(i32, i8*)
define void @test_wrssq(i64 %a, i8* %__p) {
; CHECK-LABEL: test_wrssq:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: wrssq %rdi, (%rsi)
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.wrssq(i64 %a, i8* %__p)
ret void
}
declare void @llvm.x86.wrssq(i64, i8*)
define void @test_wrussd(i32 %a, i8* %__p) {
; CHECK-LABEL: test_wrussd:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: wrussd %edi, (%rsi)
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.wrussd(i32 %a, i8* %__p)
ret void
}
declare void @llvm.x86.wrussd(i32, i8*)
define void @test_wrussq(i64 %a, i8* %__p) {
; CHECK-LABEL: test_wrussq:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: wrussq %rdi, (%rsi)
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.wrussq(i64 %a, i8* %__p)
ret void
}
declare void @llvm.x86.wrussq(i64, i8*)
define void @test_setssbsy() {
; CHECK-LABEL: test_setssbsy:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: setssbsy
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.setssbsy()
ret void
}
declare void @llvm.x86.setssbsy()
define void @test_clrssbsy(i8* %__p) {
; CHECK-LABEL: test_clrssbsy:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: clrssbsy (%rdi)
; CHECK-NEXT: retq
entry:
tail call void @llvm.x86.clrssbsy(i8* %__p)
ret void
}
declare void @llvm.x86.clrssbsy(i8* %__p)

View File

@ -136,11 +136,11 @@ body: |
DBG_VALUE debug-use %2, debug-use _, !23, !DIExpression(), debug-location !32
%3 = MOV32rm %rip, 1, _, @bar + 4, _, debug-location !33 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 1)`, !tbaa !28)
DBG_VALUE debug-use %3, debug-use _, !24, !DIExpression(), debug-location !34
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !35
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35
%edi = COPY %2, debug-location !35
%esi = COPY %3, debug-location !35
CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit killed %edi, implicit killed %esi, implicit-def %rsp, debug-location !35
ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !35
CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit killed %edi, implicit killed %esi, implicit-def %rsp, debug-location !35
ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35
%eax = MOV32r0 implicit-def dead %eflags, debug-location !36
RET 0, killed %eax, debug-location !36

View File

@ -134,11 +134,11 @@ body: |
DBG_VALUE debug-use %2, debug-use _, !23, !DIExpression(), debug-location !32
%3 = MOV32rm %rip, 1, _, @bar + 4, _, debug-location !33 :: (dereferenceable load 4 from `i32* getelementptr inbounds ([2 x i32], [2 x i32]* @bar, i64 0, i64 1)`, !tbaa !28)
DBG_VALUE debug-use %3, debug-use _, !24, !DIExpression(), debug-location !34
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !35
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35
%edi = COPY %2, debug-location !35
%esi = COPY %3, debug-location !35
CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit killed %edi, implicit killed %esi, implicit-def %rsp, debug-location !35
ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !35
CALL64pcrel32 @foo, csr_64, implicit %rsp, implicit %ssp, implicit killed %edi, implicit killed %esi, implicit-def %rsp, debug-location !35
ADJCALLSTACKUP64 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !35
%eax = MOV32r0 implicit-def dead %eflags, debug-location !36
RET 0, killed %eax, debug-location !36

View File

@ -129,19 +129,19 @@ body: |
%0 = COPY %ecx
MOV32mr %stack.0.x.addr, 1, _, 0, _, %0 :: (store 4 into %ir.x.addr)
DBG_VALUE %stack.0.x.addr, 0, !13, !DIExpression(), debug-location !14
ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !15
ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !15
%1 = LEA64r %stack.0.x.addr, 1, _, 0, _
%rcx = COPY %1, debug-location !15
CALL64pcrel32 @escape, csr_win64, implicit %rsp, implicit %rcx, implicit-def %rsp, debug-location !15
ADJCALLSTACKUP64 32, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !15
CALL64pcrel32 @escape, csr_win64, implicit %rsp, implicit %ssp, implicit %rcx, implicit-def %rsp, implicit-def %ssp, debug-location !15
ADJCALLSTACKUP64 32, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !15
DBG_VALUE 1, debug-use _, !13, !DIExpression(), debug-location !16
MOV32mi %rip, 1, _, @global, _, 1, debug-location !17 :: (store 4 into @global)
DBG_VALUE %stack.0.x.addr, 0, !13, !DIExpression(), debug-location !18
MOV32mi %stack.0.x.addr, 1, _, 0, _, 2, debug-location !18 :: (store 4 into %ir.x.addr)
ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !19
ADJCALLSTACKDOWN64 32, 0, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !19
%rcx = COPY %1, debug-location !19
CALL64pcrel32 @escape, csr_win64, implicit %rsp, implicit %rcx, implicit-def %rsp, debug-location !19
ADJCALLSTACKUP64 32, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit %rsp, debug-location !19
CALL64pcrel32 @escape, csr_win64, implicit %rsp, implicit %ssp, implicit %rcx, implicit-def %rsp, implicit-def %ssp, debug-location !19
ADJCALLSTACKUP64 32, 0, implicit-def dead %rsp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %rsp, implicit %ssp, debug-location !19
RET 0, debug-location !20
...

169
test/MC/X86/cet-encoding.s Normal file
View File

@ -0,0 +1,169 @@
// RUN: llvm-mc -triple x86_64-unknown-unknown -mattr=+shstk --show-encoding %s | FileCheck %s
// CHECK: incsspd %r13d
// CHECK: # encoding: [0xf3,0x41,0x0f,0xae,0xed]
incsspd %r13d
// CHECK: incsspq %r15
// CHECK: # encoding: [0xf3,0x49,0x0f,0xae,0xef]
incsspq %r15
// CHECK: rdsspq %r15
// CHECK: # encoding: [0xf3,0x49,0x0f,0x1e,0xcf]
rdsspq %r15
// CHECK: rdsspd %r13d
// CHECK: # encoding: [0xf3,0x41,0x0f,0x1e,0xcd]
rdsspd %r13d
// CHECK: saveprevssp
// CHECK: # encoding: [0xf3,0x0f,0x01,0xea]
saveprevssp
// CHECK: rstorssp 485498096
// CHECK: # encoding: [0xf3,0x0f,0x01,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
rstorssp 485498096
// CHECK: rstorssp (%rdx)
// CHECK: # encoding: [0xf3,0x0f,0x01,0x2a]
rstorssp (%rdx)
// CHECK: rstorssp 64(%rdx)
// CHECK: # encoding: [0xf3,0x0f,0x01,0x6a,0x40]
rstorssp 64(%rdx)
// CHECK: rstorssp 64(%rdx,%rax)
// CHECK: # encoding: [0xf3,0x0f,0x01,0x6c,0x02,0x40]
rstorssp 64(%rdx,%rax)
// CHECK: rstorssp 64(%rdx,%rax,4)
// CHECK: # encoding: [0xf3,0x0f,0x01,0x6c,0x82,0x40]
rstorssp 64(%rdx,%rax,4)
// CHECK: rstorssp -64(%rdx,%rax,4)
// CHECK: # encoding: [0xf3,0x0f,0x01,0x6c,0x82,0xc0]
rstorssp -64(%rdx,%rax,4)
// CHECK: wrssq %r15, 485498096
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf6,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
wrssq %r15, 485498096
// CHECK: wrssq %r15, (%rdx)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf6,0x3a]
wrssq %r15, (%rdx)
// CHECK: wrssq %r15, 64(%rdx)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf6,0x7a,0x40]
wrssq %r15, 64(%rdx)
// CHECK: wrssq %r15, 64(%rdx,%rax)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf6,0x7c,0x02,0x40]
wrssq %r15, 64(%rdx,%rax)
// CHECK: wrssq %r15, 64(%rdx,%rax,4)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf6,0x7c,0x82,0x40]
wrssq %r15, 64(%rdx,%rax,4)
// CHECK: wrssq %r15, -64(%rdx,%rax,4)
// CHECK: # encoding: [0x4c,0x0f,0x38,0xf6,0x7c,0x82,0xc0]
wrssq %r15, -64(%rdx,%rax,4)
// CHECK: wrssd %r13d, 485498096
// CHECK: # encoding: [0x44,0x0f,0x38,0xf6,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
wrssd %r13d, 485498096
// CHECK: wrssd %r13d, (%rdx)
// CHECK: # encoding: [0x44,0x0f,0x38,0xf6,0x2a]
wrssd %r13d, (%rdx)
// CHECK: wrssd %r13d, 64(%rdx)
// CHECK: # encoding: [0x44,0x0f,0x38,0xf6,0x6a,0x40]
wrssd %r13d, 64(%rdx)
// CHECK: wrssd %r13d, 64(%rdx,%rax)
// CHECK: # encoding: [0x44,0x0f,0x38,0xf6,0x6c,0x02,0x40]
wrssd %r13d, 64(%rdx,%rax)
// CHECK: wrssd %r13d, 64(%rdx,%rax,4)
// CHECK: # encoding: [0x44,0x0f,0x38,0xf6,0x6c,0x82,0x40]
wrssd %r13d, 64(%rdx,%rax,4)
// CHECK: wrssd %r13d, -64(%rdx,%rax,4)
// CHECK: # encoding: [0x44,0x0f,0x38,0xf6,0x6c,0x82,0xc0]
wrssd %r13d, -64(%rdx,%rax,4)
// CHECK: wrussd %r13d, 485498096
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf5,0x2c,0x25,0xf0,0x1c,0xf0,0x1c]
wrussd %r13d, 485498096
// CHECK: wrussd %r13d, (%rdx)
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf5,0x2a]
wrussd %r13d, (%rdx)
// CHECK: wrussd %r13d, 64(%rdx)
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf5,0x6a,0x40]
wrussd %r13d, 64(%rdx)
// CHECK: wrussd %r13d, 64(%rdx,%rax)
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf5,0x6c,0x02,0x40]
wrussd %r13d, 64(%rdx,%rax)
// CHECK: wrussd %r13d, 64(%rdx,%rax,4)
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf5,0x6c,0x82,0x40]
wrussd %r13d, 64(%rdx,%rax,4)
// CHECK: wrussd %r13d, -64(%rdx,%rax,4)
// CHECK: # encoding: [0x66,0x44,0x0f,0x38,0xf5,0x6c,0x82,0xc0]
wrussd %r13d, -64(%rdx,%rax,4)
// CHECK: wrussq %r15, 485498096
// CHECK: # encoding: [0x66,0x4c,0x0f,0x38,0xf5,0x3c,0x25,0xf0,0x1c,0xf0,0x1c]
wrussq %r15, 485498096
// CHECK: wrussq %r15, (%rdx)
// CHECK: # encoding: [0x66,0x4c,0x0f,0x38,0xf5,0x3a]
wrussq %r15, (%rdx)
// CHECK: wrussq %r15, 64(%rdx)
// CHECK: # encoding: [0x66,0x4c,0x0f,0x38,0xf5,0x7a,0x40]
wrussq %r15, 64(%rdx)
// CHECK: wrussq %r15, 64(%rdx,%rax)
// CHECK: # encoding: [0x66,0x4c,0x0f,0x38,0xf5,0x7c,0x02,0x40]
wrussq %r15, 64(%rdx,%rax)
// CHECK: wrussq %r15, 64(%rdx,%rax,4)
// CHECK: # encoding: [0x66,0x4c,0x0f,0x38,0xf5,0x7c,0x82,0x40]
wrussq %r15, 64(%rdx,%rax,4)
// CHECK: wrussq %r15, -64(%rdx,%rax,4)
// CHECK: # encoding: [0x66,0x4c,0x0f,0x38,0xf5,0x7c,0x82,0xc0]
wrussq %r15, -64(%rdx,%rax,4)
// CHECK: clrssbsy 485498096
// CHECK: # encoding: [0xf3,0x0f,0xae,0x34,0x25,0xf0,0x1c,0xf0,0x1c]
clrssbsy 485498096
// CHECK: clrssbsy (%rdx)
// CHECK: # encoding: [0xf3,0x0f,0xae,0x32]
clrssbsy (%rdx)
// CHECK: clrssbsy 64(%rdx)
// CHECK: # encoding: [0xf3,0x0f,0xae,0x72,0x40]
clrssbsy 64(%rdx)
// CHECK: clrssbsy 64(%rdx,%rax)
// CHECK: # encoding: [0xf3,0x0f,0xae,0x74,0x02,0x40]
clrssbsy 64(%rdx,%rax)
// CHECK: clrssbsy 64(%rdx,%rax,4)
// CHECK: # encoding: [0xf3,0x0f,0xae,0x74,0x82,0x40]
clrssbsy 64(%rdx,%rax,4)
// CHECK: clrssbsy -64(%rdx,%rax,4)
// CHECK: # encoding: [0xf3,0x0f,0xae,0x74,0x82,0xc0]
clrssbsy -64(%rdx,%rax,4)
// CHECK: setssbsy
// CHECK: # encoding: [0xf3,0x0f,0x01,0xe8]
setssbsy