1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 02:52:53 +02:00

[X86] Remove RELEASE_ and ACQUIRE_ pseudo instructions. Use isel patterns and the normal instructions instead

At one point in time acquire implied mayLoad and mayStore as did release. Thus we needed separate pseudos that also carried that property. This appears to no longer be the case. I believe it was changed in 2012 with a comment saying that atomic memory accesses are marked volatile which preserves the ordering.

So from what I can tell we shouldn't need additional pseudos since they aren't carry any flags that are different from the normal instructions. The only thing I can think of is that we may consider them for load folding candidates in the peephole pass now where we didn't before. If that's important hopefully there's something in the memory operand we can check to prevent the folding without relying on pseudo instructions.

Differential Revision: https://reviews.llvm.org/D50212

llvm-svn: 338925
This commit is contained in:
Craig Topper 2018-08-03 21:40:44 +00:00
parent c72365125a
commit 8a7a16581d
6 changed files with 77 additions and 174 deletions

View File

@ -887,48 +887,39 @@ defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add">, TB, LOCK;
* extremely late to prevent them from being accidentally reordered in the backend
* (see below the RELEASE_MOV* / ACQUIRE_MOV* pseudo-instructions)
*/
multiclass RELEASE_BINOP_MI<SDNode op> {
def NAME#8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
"#BINOP "#NAME#"8mi PSEUDO!",
[(atomic_store_8 addr:$dst, (op
(atomic_load_8 addr:$dst), (i8 imm:$src)))]>;
def NAME#8mr : I<0, Pseudo, (outs), (ins i8mem:$dst, GR8:$src),
"#BINOP "#NAME#"8mr PSEUDO!",
[(atomic_store_8 addr:$dst, (op
(atomic_load_8 addr:$dst), GR8:$src))]>;
def NAME#16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
"#BINOP "#NAME#"16mi PSEUDO!",
[(atomic_store_16 addr:$dst, (op
(atomic_load_16 addr:$dst), (i16 imm:$src)))]>;
def NAME#16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
"#BINOP "#NAME#"16mr PSEUDO!",
[(atomic_store_16 addr:$dst, (op
(atomic_load_16 addr:$dst), GR16:$src))]>;
def NAME#32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
"#BINOP "#NAME#"32mi PSEUDO!",
[(atomic_store_32 addr:$dst, (op
(atomic_load_32 addr:$dst), (i32 imm:$src)))]>;
def NAME#32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
"#BINOP "#NAME#"32mr PSEUDO!",
[(atomic_store_32 addr:$dst, (op
(atomic_load_32 addr:$dst), GR32:$src))]>;
def NAME#64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
"#BINOP "#NAME#"64mi32 PSEUDO!",
[(atomic_store_64 addr:$dst, (op
(atomic_load_64 addr:$dst), (i64immSExt32:$src)))]>;
def NAME#64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
"#BINOP "#NAME#"64mr PSEUDO!",
[(atomic_store_64 addr:$dst, (op
(atomic_load_64 addr:$dst), GR64:$src))]>;
}
let Defs = [EFLAGS], SchedRW = [WriteMicrocoded] in {
defm RELEASE_ADD : RELEASE_BINOP_MI<add>;
defm RELEASE_AND : RELEASE_BINOP_MI<and>;
defm RELEASE_OR : RELEASE_BINOP_MI<or>;
defm RELEASE_XOR : RELEASE_BINOP_MI<xor>;
// Note: we don't deal with sub, because substractions of constants are
// optimized into additions before this code can run.
multiclass RELEASE_BINOP_MI<string Name, SDNode op> {
def : Pat<(atomic_store_8 addr:$dst,
(op (atomic_load_8 addr:$dst), (i8 imm:$src))),
(!cast<Instruction>(Name#"8mi") addr:$dst, imm:$src)>;
def : Pat<(atomic_store_16 addr:$dst,
(op (atomic_load_16 addr:$dst), (i16 imm:$src))),
(!cast<Instruction>(Name#"16mi") addr:$dst, imm:$src)>;
def : Pat<(atomic_store_32 addr:$dst,
(op (atomic_load_32 addr:$dst), (i32 imm:$src))),
(!cast<Instruction>(Name#"32mi") addr:$dst, imm:$src)>;
def : Pat<(atomic_store_64 addr:$dst,
(op (atomic_load_64 addr:$dst), (i64immSExt32:$src))),
(!cast<Instruction>(Name#"64mi32") addr:$dst, (i64immSExt32:$src))>;
def : Pat<(atomic_store_8 addr:$dst,
(op (atomic_load_8 addr:$dst), (i8 GR8:$src))),
(!cast<Instruction>(Name#"8mr") addr:$dst, GR8:$src)>;
def : Pat<(atomic_store_16 addr:$dst,
(op (atomic_load_16 addr:$dst), (i16 GR16:$src))),
(!cast<Instruction>(Name#"16mr") addr:$dst, GR16:$src)>;
def : Pat<(atomic_store_32 addr:$dst,
(op (atomic_load_32 addr:$dst), (i32 GR32:$src))),
(!cast<Instruction>(Name#"32mr") addr:$dst, GR32:$src)>;
def : Pat<(atomic_store_64 addr:$dst,
(op (atomic_load_64 addr:$dst), (i64 GR64:$src))),
(!cast<Instruction>(Name#"64mr") addr:$dst, GR64:$src)>;
}
defm : RELEASE_BINOP_MI<"ADD", add>;
defm : RELEASE_BINOP_MI<"AND", and>;
defm : RELEASE_BINOP_MI<"OR", or>;
defm : RELEASE_BINOP_MI<"XOR", xor>;
// Note: we don't deal with sub, because substractions of constants are
// optimized into additions before this code can run.
// Same as above, but for floating-point.
// FIXME: imm version.
@ -953,88 +944,64 @@ defm RELEASE_FADD : RELEASE_FP_BINOP_MI<fadd>;
// FIXME: Add fsub, fmul, fdiv, ...
}
multiclass RELEASE_UNOP<dag dag8, dag dag16, dag dag32, dag dag64> {
def NAME#8m : I<0, Pseudo, (outs), (ins i8mem:$dst),
"#UNOP "#NAME#"8m PSEUDO!",
[(atomic_store_8 addr:$dst, dag8)]>;
def NAME#16m : I<0, Pseudo, (outs), (ins i16mem:$dst),
"#UNOP "#NAME#"16m PSEUDO!",
[(atomic_store_16 addr:$dst, dag16)]>;
def NAME#32m : I<0, Pseudo, (outs), (ins i32mem:$dst),
"#UNOP "#NAME#"32m PSEUDO!",
[(atomic_store_32 addr:$dst, dag32)]>;
def NAME#64m : I<0, Pseudo, (outs), (ins i64mem:$dst),
"#UNOP "#NAME#"64m PSEUDO!",
[(atomic_store_64 addr:$dst, dag64)]>;
multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
dag dag64> {
def : Pat<(atomic_store_8 addr:$dst, dag8),
(!cast<Instruction>(Name#8m) addr:$dst)>;
def : Pat<(atomic_store_16 addr:$dst, dag16),
(!cast<Instruction>(Name#16m) addr:$dst)>;
def : Pat<(atomic_store_32 addr:$dst, dag32),
(!cast<Instruction>(Name#32m) addr:$dst)>;
def : Pat<(atomic_store_64 addr:$dst, dag64),
(!cast<Instruction>(Name#64m) addr:$dst)>;
}
let Defs = [EFLAGS], Predicates = [UseIncDec], SchedRW = [WriteMicrocoded] in {
defm RELEASE_INC : RELEASE_UNOP<
let Predicates = [UseIncDec] in {
defm : RELEASE_UNOP<"INC",
(add (atomic_load_8 addr:$dst), (i8 1)),
(add (atomic_load_16 addr:$dst), (i16 1)),
(add (atomic_load_32 addr:$dst), (i32 1)),
(add (atomic_load_64 addr:$dst), (i64 1))>;
defm RELEASE_DEC : RELEASE_UNOP<
defm : RELEASE_UNOP<"DEC",
(add (atomic_load_8 addr:$dst), (i8 -1)),
(add (atomic_load_16 addr:$dst), (i16 -1)),
(add (atomic_load_32 addr:$dst), (i32 -1)),
(add (atomic_load_64 addr:$dst), (i64 -1))>;
}
let Defs = [EFLAGS] in {
defm RELEASE_NEG : RELEASE_UNOP<
(ineg (i8 (atomic_load_8 addr:$dst))),
(ineg (i16 (atomic_load_16 addr:$dst))),
(ineg (i32 (atomic_load_32 addr:$dst))),
(ineg (i64 (atomic_load_64 addr:$dst)))>;
}
// NOT doesn't set flags.
defm RELEASE_NOT : RELEASE_UNOP<
defm : RELEASE_UNOP<"NEG",
(ineg (i8 (atomic_load_8 addr:$dst))),
(ineg (i16 (atomic_load_16 addr:$dst))),
(ineg (i32 (atomic_load_32 addr:$dst))),
(ineg (i64 (atomic_load_64 addr:$dst)))>;
defm : RELEASE_UNOP<"NOT",
(not (i8 (atomic_load_8 addr:$dst))),
(not (i16 (atomic_load_16 addr:$dst))),
(not (i32 (atomic_load_32 addr:$dst))),
(not (i64 (atomic_load_64 addr:$dst)))>;
let SchedRW = [WriteMicrocoded] in {
def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),
"#RELEASE_MOV8mi PSEUDO!",
[(atomic_store_8 addr:$dst, (i8 imm:$src))]>;
def RELEASE_MOV16mi : I<0, Pseudo, (outs), (ins i16mem:$dst, i16imm:$src),
"#RELEASE_MOV16mi PSEUDO!",
[(atomic_store_16 addr:$dst, (i16 imm:$src))]>;
def RELEASE_MOV32mi : I<0, Pseudo, (outs), (ins i32mem:$dst, i32imm:$src),
"#RELEASE_MOV32mi PSEUDO!",
[(atomic_store_32 addr:$dst, (i32 imm:$src))]>;
def RELEASE_MOV64mi32 : I<0, Pseudo, (outs), (ins i64mem:$dst, i64i32imm:$src),
"#RELEASE_MOV64mi32 PSEUDO!",
[(atomic_store_64 addr:$dst, i64immSExt32:$src)]>;
def : Pat<(atomic_store_8 addr:$dst, (i8 imm:$src)),
(MOV8mi addr:$dst, imm:$src)>;
def : Pat<(atomic_store_16 addr:$dst, (i16 imm:$src)),
(MOV16mi addr:$dst, imm:$src)>;
def : Pat<(atomic_store_32 addr:$dst, (i32 imm:$src)),
(MOV32mi addr:$dst, imm:$src)>;
def : Pat<(atomic_store_64 addr:$dst, (i64immSExt32:$src)),
(MOV64mi32 addr:$dst, i64immSExt32:$src)>;
def RELEASE_MOV8mr : I<0, Pseudo, (outs), (ins i8mem :$dst, GR8 :$src),
"#RELEASE_MOV8mr PSEUDO!",
[(atomic_store_8 addr:$dst, GR8 :$src)]>;
def RELEASE_MOV16mr : I<0, Pseudo, (outs), (ins i16mem:$dst, GR16:$src),
"#RELEASE_MOV16mr PSEUDO!",
[(atomic_store_16 addr:$dst, GR16:$src)]>;
def RELEASE_MOV32mr : I<0, Pseudo, (outs), (ins i32mem:$dst, GR32:$src),
"#RELEASE_MOV32mr PSEUDO!",
[(atomic_store_32 addr:$dst, GR32:$src)]>;
def RELEASE_MOV64mr : I<0, Pseudo, (outs), (ins i64mem:$dst, GR64:$src),
"#RELEASE_MOV64mr PSEUDO!",
[(atomic_store_64 addr:$dst, GR64:$src)]>;
def : Pat<(atomic_store_8 addr:$dst, GR8:$src),
(MOV8mr addr:$dst, GR8:$src)>;
def : Pat<(atomic_store_16 addr:$dst, GR16:$src),
(MOV16mr addr:$dst, GR16:$src)>;
def : Pat<(atomic_store_32 addr:$dst, GR32:$src),
(MOV32mr addr:$dst, GR32:$src)>;
def : Pat<(atomic_store_64 addr:$dst, GR64:$src),
(MOV64mr addr:$dst, GR64:$src)>;
def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src),
"#ACQUIRE_MOV8rm PSEUDO!",
[(set GR8:$dst, (atomic_load_8 addr:$src))]>;
def ACQUIRE_MOV16rm : I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$src),
"#ACQUIRE_MOV16rm PSEUDO!",
[(set GR16:$dst, (atomic_load_16 addr:$src))]>;
def ACQUIRE_MOV32rm : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$src),
"#ACQUIRE_MOV32rm PSEUDO!",
[(set GR32:$dst, (atomic_load_32 addr:$src))]>;
def ACQUIRE_MOV64rm : I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$src),
"#ACQUIRE_MOV64rm PSEUDO!",
[(set GR64:$dst, (atomic_load_64 addr:$src))]>;
} // SchedRW
def : Pat<(i8 (atomic_load_8 addr:$src)), (MOV8rm addr:$src)>;
def : Pat<(i16 (atomic_load_16 addr:$src)), (MOV16rm addr:$src)>;
def : Pat<(i32 (atomic_load_32 addr:$src)), (MOV32rm addr:$src)>;
def : Pat<(i64 (atomic_load_64 addr:$src)), (MOV64rm addr:$src)>;
//===----------------------------------------------------------------------===//
// DAG Pattern Matching Rules

View File

@ -584,70 +584,6 @@ ReSimplify:
case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify;
case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify;
// Atomic load and store require a separate pseudo-inst because Acquire
// implies mayStore and Release implies mayLoad; fix these to regular MOV
// instructions here
case X86::ACQUIRE_MOV8rm: OutMI.setOpcode(X86::MOV8rm); goto ReSimplify;
case X86::ACQUIRE_MOV16rm: OutMI.setOpcode(X86::MOV16rm); goto ReSimplify;
case X86::ACQUIRE_MOV32rm: OutMI.setOpcode(X86::MOV32rm); goto ReSimplify;
case X86::ACQUIRE_MOV64rm: OutMI.setOpcode(X86::MOV64rm); goto ReSimplify;
case X86::RELEASE_MOV8mr: OutMI.setOpcode(X86::MOV8mr); goto ReSimplify;
case X86::RELEASE_MOV16mr: OutMI.setOpcode(X86::MOV16mr); goto ReSimplify;
case X86::RELEASE_MOV32mr: OutMI.setOpcode(X86::MOV32mr); goto ReSimplify;
case X86::RELEASE_MOV64mr: OutMI.setOpcode(X86::MOV64mr); goto ReSimplify;
case X86::RELEASE_MOV8mi: OutMI.setOpcode(X86::MOV8mi); goto ReSimplify;
case X86::RELEASE_MOV16mi: OutMI.setOpcode(X86::MOV16mi); goto ReSimplify;
case X86::RELEASE_MOV32mi: OutMI.setOpcode(X86::MOV32mi); goto ReSimplify;
case X86::RELEASE_MOV64mi32: OutMI.setOpcode(X86::MOV64mi32); goto ReSimplify;
case X86::RELEASE_ADD8mi: OutMI.setOpcode(X86::ADD8mi); goto ReSimplify;
case X86::RELEASE_ADD8mr: OutMI.setOpcode(X86::ADD8mr); goto ReSimplify;
case X86::RELEASE_ADD16mi: OutMI.setOpcode(X86::ADD16mi); goto ReSimplify;
case X86::RELEASE_ADD16mr: OutMI.setOpcode(X86::ADD16mr); goto ReSimplify;
case X86::RELEASE_ADD32mi: OutMI.setOpcode(X86::ADD32mi); goto ReSimplify;
case X86::RELEASE_ADD32mr: OutMI.setOpcode(X86::ADD32mr); goto ReSimplify;
case X86::RELEASE_ADD64mi32: OutMI.setOpcode(X86::ADD64mi32); goto ReSimplify;
case X86::RELEASE_ADD64mr: OutMI.setOpcode(X86::ADD64mr); goto ReSimplify;
case X86::RELEASE_AND8mi: OutMI.setOpcode(X86::AND8mi); goto ReSimplify;
case X86::RELEASE_AND8mr: OutMI.setOpcode(X86::AND8mr); goto ReSimplify;
case X86::RELEASE_AND16mi: OutMI.setOpcode(X86::AND16mi); goto ReSimplify;
case X86::RELEASE_AND16mr: OutMI.setOpcode(X86::AND16mr); goto ReSimplify;
case X86::RELEASE_AND32mi: OutMI.setOpcode(X86::AND32mi); goto ReSimplify;
case X86::RELEASE_AND32mr: OutMI.setOpcode(X86::AND32mr); goto ReSimplify;
case X86::RELEASE_AND64mi32: OutMI.setOpcode(X86::AND64mi32); goto ReSimplify;
case X86::RELEASE_AND64mr: OutMI.setOpcode(X86::AND64mr); goto ReSimplify;
case X86::RELEASE_OR8mi: OutMI.setOpcode(X86::OR8mi); goto ReSimplify;
case X86::RELEASE_OR8mr: OutMI.setOpcode(X86::OR8mr); goto ReSimplify;
case X86::RELEASE_OR16mi: OutMI.setOpcode(X86::OR16mi); goto ReSimplify;
case X86::RELEASE_OR16mr: OutMI.setOpcode(X86::OR16mr); goto ReSimplify;
case X86::RELEASE_OR32mi: OutMI.setOpcode(X86::OR32mi); goto ReSimplify;
case X86::RELEASE_OR32mr: OutMI.setOpcode(X86::OR32mr); goto ReSimplify;
case X86::RELEASE_OR64mi32: OutMI.setOpcode(X86::OR64mi32); goto ReSimplify;
case X86::RELEASE_OR64mr: OutMI.setOpcode(X86::OR64mr); goto ReSimplify;
case X86::RELEASE_XOR8mi: OutMI.setOpcode(X86::XOR8mi); goto ReSimplify;
case X86::RELEASE_XOR8mr: OutMI.setOpcode(X86::XOR8mr); goto ReSimplify;
case X86::RELEASE_XOR16mi: OutMI.setOpcode(X86::XOR16mi); goto ReSimplify;
case X86::RELEASE_XOR16mr: OutMI.setOpcode(X86::XOR16mr); goto ReSimplify;
case X86::RELEASE_XOR32mi: OutMI.setOpcode(X86::XOR32mi); goto ReSimplify;
case X86::RELEASE_XOR32mr: OutMI.setOpcode(X86::XOR32mr); goto ReSimplify;
case X86::RELEASE_XOR64mi32: OutMI.setOpcode(X86::XOR64mi32); goto ReSimplify;
case X86::RELEASE_XOR64mr: OutMI.setOpcode(X86::XOR64mr); goto ReSimplify;
case X86::RELEASE_INC8m: OutMI.setOpcode(X86::INC8m); goto ReSimplify;
case X86::RELEASE_INC16m: OutMI.setOpcode(X86::INC16m); goto ReSimplify;
case X86::RELEASE_INC32m: OutMI.setOpcode(X86::INC32m); goto ReSimplify;
case X86::RELEASE_INC64m: OutMI.setOpcode(X86::INC64m); goto ReSimplify;
case X86::RELEASE_DEC8m: OutMI.setOpcode(X86::DEC8m); goto ReSimplify;
case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify;
case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify;
case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify;
case X86::RELEASE_NOT8m: OutMI.setOpcode(X86::NOT8m); goto ReSimplify;
case X86::RELEASE_NOT16m: OutMI.setOpcode(X86::NOT16m); goto ReSimplify;
case X86::RELEASE_NOT32m: OutMI.setOpcode(X86::NOT32m); goto ReSimplify;
case X86::RELEASE_NOT64m: OutMI.setOpcode(X86::NOT64m); goto ReSimplify;
case X86::RELEASE_NEG8m: OutMI.setOpcode(X86::NEG8m); goto ReSimplify;
case X86::RELEASE_NEG16m: OutMI.setOpcode(X86::NEG16m); goto ReSimplify;
case X86::RELEASE_NEG32m: OutMI.setOpcode(X86::NEG32m); goto ReSimplify;
case X86::RELEASE_NEG64m: OutMI.setOpcode(X86::NEG64m); goto ReSimplify;
// We don't currently select the correct instruction form for instructions
// which have a short %eax, etc. form. Handle this by custom lowering, for
// now.

View File

@ -62,7 +62,7 @@ define half @load_half(half* %fptr) {
; CHECK: # %bb.0:
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: movw (%rdi), %ax
; CHECK-NEXT: movzwl (%rdi), %eax
; CHECK-NEXT: movzwl %ax, %edi
; CHECK-NEXT: callq __gnu_h2f_ieee
; CHECK-NEXT: popq %rax

View File

@ -28,14 +28,14 @@ define i16 @or16(i16* %p) {
; X64-LABEL: or16:
; X64: # %bb.0:
; X64-NEXT: mfence
; X64-NEXT: movw (%rdi), %ax
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: retq
;
; X32-LABEL: or16:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: mfence
; X32-NEXT: movw (%eax), %ax
; X32-NEXT: movzwl (%eax), %eax
; X32-NEXT: retl
%1 = atomicrmw or i16* %p, i16 0 acquire
ret i16 %1

View File

@ -1607,7 +1607,7 @@ define void @neg_16(i16* %p) {
; treat 16 bit arithmetic as expensive on X86/X86_64.
; X64-LABEL: neg_16:
; X64: # %bb.0:
; X64-NEXT: movw (%rdi), %ax
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: negl %eax
; X64-NEXT: movw %ax, (%rdi)
; X64-NEXT: retq
@ -1615,7 +1615,7 @@ define void @neg_16(i16* %p) {
; X32-LABEL: neg_16:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movw (%eax), %cx
; X32-NEXT: movzwl (%eax), %ecx
; X32-NEXT: negl %ecx
; X32-NEXT: movw %cx, (%eax)
; X32-NEXT: retl

View File

@ -118,7 +118,7 @@ body: |
bb.2.sw.bb:
successors: %bb.3(0x00000800), %bb.6(0x7ffff800)
$al = ACQUIRE_MOV8rm $rip, 1, $noreg, @static_local_guard, $noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
$al = MOV8rm $rip, 1, $noreg, @static_local_guard, $noreg :: (volatile load acquire 1 from `i8* bitcast (i64* @static_local_guard to i8*)`, align 8)
TEST8rr killed $al, $al, implicit-def $eflags
JNE_1 %bb.6, implicit killed $eflags
JMP_1 %bb.3