1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[X86] Allow 'atomic_store (neg/not atomic_load)' to isel to a RMW instruction.

There was a FIXMe in the td file about a type inference issue that was easy to fix.

llvm-svn: 338782
This commit is contained in:
Craig Topper 2018-08-02 23:30:38 +00:00
parent e101b55688
commit 27e7f59c1b
3 changed files with 27 additions and 32 deletions

View File

@ -974,23 +974,20 @@ let Defs = [EFLAGS], Predicates = [UseIncDec], SchedRW = [WriteMicrocoded] in {
(add (atomic_load_32 addr:$dst), (i32 -1)),
(add (atomic_load_64 addr:$dst), (i64 -1))>;
}
/*
TODO: These don't work because the type inference of TableGen fails.
TODO: find a way to fix it.
let Defs = [EFLAGS] in {
defm RELEASE_NEG : RELEASE_UNOP<
(ineg (atomic_load_8 addr:$dst)),
(ineg (atomic_load_16 addr:$dst)),
(ineg (atomic_load_32 addr:$dst)),
(ineg (atomic_load_64 addr:$dst))>;
(ineg (i8 (atomic_load_8 addr:$dst))),
(ineg (i16 (atomic_load_16 addr:$dst))),
(ineg (i32 (atomic_load_32 addr:$dst))),
(ineg (i64 (atomic_load_64 addr:$dst)))>;
}
// NOT doesn't set flags.
defm RELEASE_NOT : RELEASE_UNOP<
(not (atomic_load_8 addr:$dst)),
(not (atomic_load_16 addr:$dst)),
(not (atomic_load_32 addr:$dst)),
(not (atomic_load_64 addr:$dst))>;
*/
(not (i8 (atomic_load_8 addr:$dst))),
(not (i16 (atomic_load_16 addr:$dst))),
(not (i32 (atomic_load_32 addr:$dst))),
(not (i64 (atomic_load_64 addr:$dst)))>;
let SchedRW = [WriteMicrocoded] in {
def RELEASE_MOV8mi : I<0, Pseudo, (outs), (ins i8mem:$dst, i8imm:$src),

View File

@ -631,6 +631,14 @@ ReSimplify:
case X86::RELEASE_DEC16m: OutMI.setOpcode(X86::DEC16m); goto ReSimplify;
case X86::RELEASE_DEC32m: OutMI.setOpcode(X86::DEC32m); goto ReSimplify;
case X86::RELEASE_DEC64m: OutMI.setOpcode(X86::DEC64m); goto ReSimplify;
case X86::RELEASE_NOT8m: OutMI.setOpcode(X86::NOT8m); goto ReSimplify;
case X86::RELEASE_NOT16m: OutMI.setOpcode(X86::NOT16m); goto ReSimplify;
case X86::RELEASE_NOT32m: OutMI.setOpcode(X86::NOT32m); goto ReSimplify;
case X86::RELEASE_NOT64m: OutMI.setOpcode(X86::NOT64m); goto ReSimplify;
case X86::RELEASE_NEG8m: OutMI.setOpcode(X86::NEG8m); goto ReSimplify;
case X86::RELEASE_NEG16m: OutMI.setOpcode(X86::NEG16m); goto ReSimplify;
case X86::RELEASE_NEG32m: OutMI.setOpcode(X86::NEG32m); goto ReSimplify;
case X86::RELEASE_NEG64m: OutMI.setOpcode(X86::NEG64m); goto ReSimplify;
// We don't currently select the correct instruction form for instructions
// which have a short %eax, etc. form. Handle this by custom lowering, for

View File

@ -1508,13 +1508,13 @@ define void @dec_32_seq_cst(i32* %p) {
define void @not_8(i8* %p) {
; X64-LABEL: not_8:
; X64: # %bb.0:
; X64-NEXT: xorb $-1, (%rdi)
; X64-NEXT: notb (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: not_8:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorb $-1, (%eax)
; X32-NEXT: notb (%eax)
; X32-NEXT: retl
%1 = load atomic i8, i8* %p seq_cst, align 1
%2 = xor i8 %1, -1
@ -1548,13 +1548,13 @@ define void @not_16(i16* %p) {
define void @not_32(i32* %p) {
; X64-LABEL: not_32:
; X64: # %bb.0:
; X64-NEXT: xorl $-1, (%rdi)
; X64-NEXT: notl (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: not_32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl $-1, (%eax)
; X32-NEXT: notl (%eax)
; X32-NEXT: retl
%1 = load atomic i32, i32* %p acquire, align 4
%2 = xor i32 %1, -1
@ -1565,7 +1565,7 @@ define void @not_32(i32* %p) {
define void @not_64(i64* %p) {
; X64-LABEL: not_64:
; X64: # %bb.0:
; X64-NEXT: xorq $-1, (%rdi)
; X64-NEXT: notq (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: not_64:
@ -1632,17 +1632,13 @@ define void @not_32_seq_cst(i32* %p) {
define void @neg_8(i8* %p) {
; X64-LABEL: neg_8:
; X64: # %bb.0:
; X64-NEXT: movb (%rdi), %al
; X64-NEXT: negb %al
; X64-NEXT: movb %al, (%rdi)
; X64-NEXT: negb (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: neg_8:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movb (%eax), %cl
; X32-NEXT: negb %cl
; X32-NEXT: movb %cl, (%eax)
; X32-NEXT: negb (%eax)
; X32-NEXT: retl
%1 = load atomic i8, i8* %p seq_cst, align 1
%2 = sub i8 0, %1
@ -1676,17 +1672,13 @@ define void @neg_16(i16* %p) {
define void @neg_32(i32* %p) {
; X64-LABEL: neg_32:
; X64: # %bb.0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: negl %eax
; X64-NEXT: movl %eax, (%rdi)
; X64-NEXT: negl (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: neg_32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl (%eax), %ecx
; X32-NEXT: negl %ecx
; X32-NEXT: movl %ecx, (%eax)
; X32-NEXT: negl (%eax)
; X32-NEXT: retl
%1 = load atomic i32, i32* %p acquire, align 4
%2 = sub i32 0, %1
@ -1697,9 +1689,7 @@ define void @neg_32(i32* %p) {
define void @neg_64(i64* %p) {
; X64-LABEL: neg_64:
; X64: # %bb.0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: negq %rax
; X64-NEXT: movq %rax, (%rdi)
; X64-NEXT: negq (%rdi)
; X64-NEXT: retq
;
; X32-LABEL: neg_64: