1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-02-01 13:11:39 +01:00

[mips] Define patterns for the atomic_{load,store}_{8,16,32,64} nodes.

Summary:
Without these patterns we would generate a complete LL/SC sequence.
This would be problematic for memory regions marked as WRITE-only or
READ-only, as the instructions LL/SC would read/write to the protected
memory regions correspondingly.

Reviewers: dsanders

Subscribers: llvm-commits, dsanders

Differential Revision: http://reviews.llvm.org/D14397

llvm-svn: 252293
This commit is contained in:
Vasileios Kalintiris 2015-11-06 12:07:20 +00:00
parent 0113590b91
commit ac58e2a800
7 changed files with 136 additions and 36 deletions

View File

@ -546,6 +546,18 @@ def : MipsPat<(brcond (i32 (setne (and i64:$lhs, PowerOf2HI:$mask), 0)), bb:$dst
(BBIT132 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>;
}
// Atomic load patterns.
def : MipsPat<(atomic_load_8 addr:$a), (LB64 addr:$a)>;
def : MipsPat<(atomic_load_16 addr:$a), (LH64 addr:$a)>;
def : MipsPat<(atomic_load_32 addr:$a), (LW64 addr:$a)>;
def : MipsPat<(atomic_load_64 addr:$a), (LD addr:$a)>;
// Atomic store patterns.
def : MipsPat<(atomic_store_8 addr:$a, GPR64:$v), (SB64 GPR64:$v, addr:$a)>;
def : MipsPat<(atomic_store_16 addr:$a, GPR64:$v), (SH64 GPR64:$v, addr:$a)>;
def : MipsPat<(atomic_store_32 addr:$a, GPR64:$v), (SW64 GPR64:$v, addr:$a)>;
def : MipsPat<(atomic_store_64 addr:$a, GPR64:$v), (SD GPR64:$v, addr:$a)>;
//===----------------------------------------------------------------------===//
// Instruction aliases
//===----------------------------------------------------------------------===//

View File

@ -391,10 +391,10 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
if (!Subtarget.isGP64bit()) {
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
}
setInsertFencesForAtomic(true);

View File

@ -2083,6 +2083,16 @@ let AddedComplexity = 40 in {
}
}
// Atomic load patterns.
def : MipsPat<(atomic_load_8 addr:$a), (LB addr:$a)>;
def : MipsPat<(atomic_load_16 addr:$a), (LH addr:$a)>;
def : MipsPat<(atomic_load_32 addr:$a), (LW addr:$a)>;
// Atomic store patterns.
def : MipsPat<(atomic_store_8 addr:$a, GPR32:$v), (SB GPR32:$v, addr:$a)>;
def : MipsPat<(atomic_store_16 addr:$a, GPR32:$v), (SH GPR32:$v, addr:$a)>;
def : MipsPat<(atomic_store_32 addr:$a, GPR32:$v), (SW GPR32:$v, addr:$a)>;
//===----------------------------------------------------------------------===//
// Floating Point Support
//===----------------------------------------------------------------------===//

View File

@ -1,32 +0,0 @@
; RUN: llc -asm-show-inst -march=mips64el -mcpu=mips64r6 < %s -filetype=asm -o - | FileCheck %s -check-prefix=CHK64
; RUN: llc -asm-show-inst -march=mipsel -mcpu=mips32r6 < %s -filetype=asm -o -| FileCheck %s -check-prefix=CHK32
define internal i32 @atomic_load_test1() #0 {
entry:
%load_add = alloca i32*, align 8
%.atomicdst = alloca i32, align 4
%0 = load i32*, i32** %load_add, align 8
%1 = load atomic i32, i32* %0 acquire, align 4
store i32 %1, i32* %.atomicdst, align 4
%2 = load i32, i32* %.atomicdst, align 4
ret i32 %2
}
define internal i64 @atomic_load_test2() #0 {
entry:
%load_add = alloca i64*, align 16
%.atomicdst = alloca i64, align 8
%0 = load i64*, i64** %load_add, align 16
%1 = load atomic i64, i64* %0 acquire, align 8
store i64 %1, i64* %.atomicdst, align 8
%2 = load i64, i64* %.atomicdst, align 8
ret i64 %2
}
;CHK32: LL_R6
;CHK32: SC_R6
;CHK64: LLD_R6
;CHK64: SCD_R6

View File

@ -0,0 +1,26 @@
; RUN: llc -asm-show-inst -march=mipsel -mcpu=mips32r6 < %s | \
; RUN: FileCheck %s -check-prefix=CHK32
; RUN: llc -asm-show-inst -march=mips64el -mcpu=mips64r6 < %s | \
; RUN: FileCheck %s -check-prefix=CHK64
@a = common global i32 0, align 4
@b = common global i64 0, align 8
define i32 @ll_sc(i32 signext %x) {
; CHK32-LABEL: ll_sc
;CHK32: LL_R6
;CHK32: SC_R6
%1 = atomicrmw add i32* @a, i32 %x monotonic
ret i32 %1
}
define i64 @lld_scd(i64 signext %x) {
; CHK64-LABEL: lld_scd
;CHK64: LLD_R6
;CHK64: SCD_R6
%1 = atomicrmw add i64* @b, i64 %x monotonic
ret i64 %1
}

View File

@ -0,0 +1,42 @@
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL
; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL
; RUN: llc -march=mips64 -mcpu=mips64r2 < %s | \
; RUN: FileCheck %s -check-prefix=ALL -check-prefix=M64
; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \
; RUN: FileCheck %s -check-prefix=ALL -check-prefix=M64
define i8 @load_i8(i8* %ptr) {
; ALL-LABEL: load_i8
; ALL: lb $2, 0($4)
; ALL: sync
%val = load atomic i8, i8* %ptr acquire, align 1
ret i8 %val
}
define i16 @load_i16(i16* %ptr) {
; ALL-LABEL: load_i16
; ALL: lh $2, 0($4)
; ALL: sync
%val = load atomic i16, i16* %ptr acquire, align 2
ret i16 %val
}
define i32 @load_i32(i32* %ptr) {
; ALL-LABEL: load_i32
; ALL: lw $2, 0($4)
; ALL: sync
%val = load atomic i32, i32* %ptr acquire, align 4
ret i32 %val
}
define i64 @load_i64(i64* %ptr) {
; M64-LABEL: load_i64
; M64: ld $2, 0($4)
; M64: sync
%val = load atomic i64, i64* %ptr acquire, align 8
ret i64 %val
}

View File

@ -0,0 +1,42 @@
; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL
; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL
; RUN: llc -march=mips64 -mcpu=mips64r2 < %s | \
; RUN: FileCheck %s -check-prefix=ALL -check-prefix=M64
; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \
; RUN: FileCheck %s -check-prefix=ALL -check-prefix=M64
define void @store_i8(i8* %ptr, i8 signext %v) {
; ALL-LABEL: store_i8
; ALL: sync
; ALL: sb $5, 0($4)
store atomic i8 %v, i8* %ptr release, align 1
ret void
}
define void @store_i16(i16* %ptr, i16 signext %v) {
; ALL-LABEL: store_i16
; ALL: sync
; ALL: sh $5, 0($4)
store atomic i16 %v, i16* %ptr release, align 2
ret void
}
define void @store_i32(i32* %ptr, i32 signext %v) {
; ALL-LABEL: store_i32
; ALL: sync
; ALL: sw $5, 0($4)
store atomic i32 %v, i32* %ptr release, align 4
ret void
}
define void @store_i64(i64* %ptr, i64 %v) {
; M64-LABEL: store_i64
; M64: sync
; M64: sd $5, 0($4)
store atomic i64 %v, i64* %ptr release, align 8
ret void
}