mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-10-19 02:52:53 +02:00
[AArch64] Consider all vector types for FeatureSlowMisaligned128Store
The original code considered only v2i64 as slow for this feature. This patch consider all 128-bit long vector types as slow candidates. In internal tests, extending this feature to all 128-bit vector types resulted in an overall improvement of 1% on Exynos M1. Differential revision: https://reviews.llvm.org/D27998 llvm-svn: 291616
This commit is contained in:
parent
6b917afcf9
commit
d4b737007e
@ -466,28 +466,27 @@ int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
|
||||
return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
|
||||
}
|
||||
|
||||
int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
|
||||
int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
|
||||
unsigned Alignment, unsigned AddressSpace) {
|
||||
std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
|
||||
auto LT = TLI->getTypeLegalizationCost(DL, Ty);
|
||||
|
||||
if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
|
||||
Src->isVectorTy() && Alignment != 16 &&
|
||||
Src->getVectorElementType()->isIntegerTy(64)) {
|
||||
// Unaligned stores are extremely inefficient. We don't split
|
||||
// unaligned v2i64 stores because the negative impact that has shown in
|
||||
// practice on inlined memcpy code.
|
||||
// We make v2i64 stores expensive so that we will only vectorize if there
|
||||
LT.second.is128BitVector() && Alignment < 16) {
|
||||
// Unaligned stores are extremely inefficient. We don't split all
|
||||
// unaligned 128-bit stores because the negative impact that has shown in
|
||||
// practice on inlined block copy code.
|
||||
// We make such stores expensive so that we will only vectorize if there
|
||||
// are 6 other instructions getting vectorized.
|
||||
int AmortizationCost = 6;
|
||||
const int AmortizationCost = 6;
|
||||
|
||||
return LT.first * 2 * AmortizationCost;
|
||||
}
|
||||
|
||||
if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
|
||||
Src->getVectorNumElements() < 8) {
|
||||
if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
|
||||
Ty->getVectorNumElements() < 8) {
|
||||
// We scalarize the loads/stores because there is not v.4b register and we
|
||||
// have to promote the elements to v.4h.
|
||||
unsigned NumVecElts = Src->getVectorNumElements();
|
||||
unsigned NumVecElts = Ty->getVectorNumElements();
|
||||
unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
|
||||
// We generate 2 instructions per vector element.
|
||||
return NumVectorizableInstsToAmortize * NumVecElts * 2;
|
||||
|
@ -1,17 +1,59 @@
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown | FileCheck %s
|
||||
; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-unknown -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE
|
||||
|
||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
|
||||
; CHECK-LABEL: getMemoryOpCost
|
||||
; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost
|
||||
define void @getMemoryOpCost() {
|
||||
; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On
|
||||
; Cyclone, for example, such stores should be expensive because we don't
|
||||
; split them and misaligned 16b stores have bad performance.
|
||||
;
|
||||
; CHECK: cost of 1 {{.*}} store
|
||||
; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store
|
||||
; If FeatureSlowMisaligned128Store is set, we penalize 128-bit stores.
|
||||
; The unlegalized 256-bit stores are further penalized when legalized down
|
||||
; to 128-bit stores.
|
||||
|
||||
; CHECK: cost of 2 for {{.*}} store <4 x i64>
|
||||
; SLOW_MISALIGNED_128_STORE: cost of 24 for {{.*}} store <4 x i64>
|
||||
store <4 x i64> undef, <4 x i64> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <8 x i32>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x i32>
|
||||
store <8 x i32> undef, <8 x i32> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <16 x i16>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x i16>
|
||||
store <16 x i16> undef, <16 x i16> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <32 x i8>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <32 x i8>
|
||||
store <32 x i8> undef, <32 x i8> * undef
|
||||
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <4 x double>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <4 x double>
|
||||
store <4 x double> undef, <4 x double> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <8 x float>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <8 x float>
|
||||
store <8 x float> undef, <8 x float> * undef
|
||||
; CHECK-NEXT: cost of 2 for {{.*}} store <16 x half>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 24 for {{.*}} store <16 x half>
|
||||
store <16 x half> undef, <16 x half> * undef
|
||||
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <2 x i64>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x i64>
|
||||
store <2 x i64> undef, <2 x i64> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <4 x i32>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x i32>
|
||||
store <4 x i32> undef, <4 x i32> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <8 x i16>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x i16>
|
||||
store <8 x i16> undef, <8 x i16> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <16 x i8>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <16 x i8>
|
||||
store <16 x i8> undef, <16 x i8> * undef
|
||||
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <2 x double>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <2 x double>
|
||||
store <2 x double> undef, <2 x double> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <4 x float>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <4 x float>
|
||||
store <4 x float> undef, <4 x float> * undef
|
||||
; CHECK-NEXT: cost of 1 for {{.*}} store <8 x half>
|
||||
; SLOW_MISALIGNED_128_STORE-NEXT: cost of 12 for {{.*}} store <8 x half>
|
||||
store <8 x half> undef, <8 x half> * undef
|
||||
|
||||
; We scalarize the loads/stores because there is no vector register name for
|
||||
; these types (they get extended to v.4h/v.2s).
|
||||
|
Loading…
Reference in New Issue
Block a user