1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-24 03:33:20 +01:00

Use movlps / movhps to modify low / high half of 16-byet memory location.

llvm-svn: 51501
This commit is contained in:
Evan Cheng 2008-05-23 21:23:16 +00:00
parent 2412469191
commit 4f660778f0
4 changed files with 94 additions and 62 deletions

View File

@ -505,46 +505,6 @@ nodes which are selected to max / min instructions that are marked commutable.
//===---------------------------------------------------------------------===//
We should compile this:
#include <xmmintrin.h>
typedef union {
int i[4];
float f[4];
__m128 v;
} vector4_t;
void swizzle (const void *a, vector4_t * b, vector4_t * c) {
b->v = _mm_loadl_pi (b->v, (__m64 *) a);
c->v = _mm_loadl_pi (c->v, ((__m64 *) a) + 1);
}
to:
_swizzle:
movl 4(%esp), %eax
movl 8(%esp), %edx
movl 12(%esp), %ecx
movlps (%eax), %xmm0
movlps %xmm0, (%edx)
movlps 8(%eax), %xmm0
movlps %xmm0, (%ecx)
ret
not:
swizzle:
movl 8(%esp), %eax
movaps (%eax), %xmm0
movl 4(%esp), %ecx
movlps (%ecx), %xmm0
movaps %xmm0, (%eax)
movl 12(%esp), %eax
movaps (%eax), %xmm0
movlps 8(%ecx), %xmm0
movaps %xmm0, (%eax)
ret
//===---------------------------------------------------------------------===//
We should materialize vector constants like "all ones" and "signbit" with
code like:

View File

@ -363,6 +363,32 @@ static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load,
Store.getOperand(2), Store.getOperand(3));
}
/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
///
static bool isRMWLoad(SDOperand N, SDOperand Chain, SDOperand Address,
SDOperand &Load) {
if (N.getOpcode() == ISD::BIT_CONVERT)
N = N.getOperand(0);
LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
if (!LD || LD->isVolatile())
return false;
if (LD->getAddressingMode() != ISD::UNINDEXED)
return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
return false;
if (N.hasOneUse() &&
N.getOperand(1) == Address &&
N.Val->isOperandOf(Chain.Val)) {
Load = N;
return true;
}
return false;
}
/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
/// This is only run if not in -fast mode (aka -O0).
/// This allows the instruction selector to pick more read-modify-write
@ -414,8 +440,8 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
SDOperand N1 = I->getOperand(1);
SDOperand N2 = I->getOperand(2);
if (MVT::isFloatingPoint(N1.getValueType()) ||
MVT::isVector(N1.getValueType()) ||
if ((MVT::isFloatingPoint(N1.getValueType()) &&
!MVT::isVector(N1.getValueType())) ||
!N1.hasOneUse())
continue;
@ -429,20 +455,13 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
case ISD::OR:
case ISD::XOR:
case ISD::ADDC:
case ISD::ADDE: {
case ISD::ADDE:
case ISD::VECTOR_SHUFFLE: {
SDOperand N10 = N1.getOperand(0);
SDOperand N11 = N1.getOperand(1);
if (ISD::isNON_EXTLoad(N10.Val))
RModW = true;
else if (ISD::isNON_EXTLoad(N11.Val)) {
RModW = true;
std::swap(N10, N11);
}
RModW = RModW && N10.Val->isOperandOf(Chain.Val) && N10.hasOneUse() &&
(N10.getOperand(1) == N2) &&
(N10.Val->getValueType(0) == N1.getValueType());
if (RModW)
Load = N10;
RModW = isRMWLoad(N10, Chain, N2, Load);
if (!RModW)
RModW = isRMWLoad(N11, Chain, N2, Load);
break;
}
case ISD::SUB:
@ -456,12 +475,7 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
case X86ISD::SHLD:
case X86ISD::SHRD: {
SDOperand N10 = N1.getOperand(0);
if (ISD::isNON_EXTLoad(N10.Val))
RModW = N10.Val->isOperandOf(Chain.Val) && N10.hasOneUse() &&
(N10.getOperand(1) == N2) &&
(N10.Val->getValueType(0) == N1.getValueType());
if (RModW)
Load = N10;
RModW = isRMWLoad(N10, Chain, N2, Load);
break;
}
}

View File

@ -2977,13 +2977,15 @@ def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
MOVHP_shuffle_mask)),
(MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
def : Pat<(v4i32 (vector_shuffle VR128:$src1,
(bc_v4i32 (memopv2i64 addr:$src2)),
MOVLP_shuffle_mask)),
(MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
MOVLP_shuffle_mask)),
(MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
def : Pat<(v4i32 (vector_shuffle VR128:$src1,
(bc_v4i32 (memopv2i64 addr:$src2)),
MOVHP_shuffle_mask)),
(MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
@ -2991,6 +2993,37 @@ def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
(MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
}
// (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
// (store (vector_shuffle (load addr), v2, <0, 1, 4, 5>), addr) using MOVHPS
def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
MOVLP_shuffle_mask)), addr:$src1),
(MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
MOVLP_shuffle_mask)), addr:$src1),
(MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
MOVHP_shuffle_mask)), addr:$src1),
(MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
MOVHP_shuffle_mask)), addr:$src1),
(MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
def : Pat<(store (v4i32 (vector_shuffle
(bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
MOVLP_shuffle_mask)), addr:$src1),
(MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
MOVLP_shuffle_mask)), addr:$src1),
(MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
def : Pat<(store (v4i32 (vector_shuffle
(bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
MOVHP_shuffle_mask)), addr:$src1),
(MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
MOVHP_shuffle_mask)), addr:$src1),
(MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
let AddedComplexity = 15 in {
// Setting the lowest element in the vector.
def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,

View File

@ -0,0 +1,25 @@
; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | grep mov | count 7
%struct.vector4_t = type { <4 x float> }
define void @swizzle(i8* %a, %struct.vector4_t* %b, %struct.vector4_t* %c) nounwind {
entry:
%tmp9 = getelementptr %struct.vector4_t* %b, i32 0, i32 0 ; <<4 x float>*> [#uses=2]
%tmp10 = load <4 x float>* %tmp9, align 16 ; <<4 x float>> [#uses=1]
%tmp14 = bitcast i8* %a to double* ; <double*> [#uses=1]
%tmp15 = load double* %tmp14 ; <double> [#uses=1]
%tmp16 = insertelement <2 x double> undef, double %tmp15, i32 0 ; <<2 x double>> [#uses=1]
%tmp18 = bitcast <2 x double> %tmp16 to <4 x float> ; <<4 x float>> [#uses=1]
%tmp19 = shufflevector <4 x float> %tmp10, <4 x float> %tmp18, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp19, <4 x float>* %tmp9, align 16
%tmp28 = getelementptr %struct.vector4_t* %c, i32 0, i32 0 ; <<4 x float>*> [#uses=2]
%tmp29 = load <4 x float>* %tmp28, align 16 ; <<4 x float>> [#uses=1]
%tmp26 = getelementptr i8* %a, i32 8 ; <i8*> [#uses=1]
%tmp33 = bitcast i8* %tmp26 to double* ; <double*> [#uses=1]
%tmp34 = load double* %tmp33 ; <double> [#uses=1]
%tmp35 = insertelement <2 x double> undef, double %tmp34, i32 0 ; <<2 x double>> [#uses=1]
%tmp37 = bitcast <2 x double> %tmp35 to <4 x float> ; <<4 x float>> [#uses=1]
%tmp38 = shufflevector <4 x float> %tmp29, <4 x float> %tmp37, <4 x i32> < i32 4, i32 5, i32 2, i32 3 > ; <<4 x float>> [#uses=1]
store <4 x float> %tmp38, <4 x float>* %tmp28, align 16
ret void
}