1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-19 19:12:56 +02:00

[WebAssembly] Custom lower i64x2 constant shifts to avoid wrap

Summary: Depends on D53057.

Reviewers: aheejin, dschuff

Subscribers: sbc100, jgravelle-google, sunfish, llvm-commits

Differential Revision: https://reviews.llvm.org/D53251

llvm-svn: 344825
This commit is contained in:
Thomas Lively 2018-10-20 01:31:18 +00:00
parent 32ecb08040
commit e840b7a5ec
5 changed files with 81 additions and 3 deletions

View File

@ -22,5 +22,8 @@ HANDLE_NODETYPE(Wrapper)
HANDLE_NODETYPE(BR_IF)
HANDLE_NODETYPE(BR_TABLE)
HANDLE_NODETYPE(SHUFFLE)
HANDLE_NODETYPE(VEC_SHL)
HANDLE_NODETYPE(VEC_SHR_S)
HANDLE_NODETYPE(VEC_SHR_U)
// add memory opcodes starting at ISD::FIRST_TARGET_MEMORY_OPCODE here...

View File

@ -137,6 +137,11 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
}
}
// Custom lowering to avoid having to emit a wrap for 2xi64 constant shifts
if (Subtarget->hasSIMD128() && EnableUnimplementedWasmSIMDInstrs)
for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL})
setOperationAction(Op, MVT::v2i64, Custom);
// As a special case, these operators use the type to mean the type to
// sign-extend from.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@ -823,6 +828,10 @@ SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::VECTOR_SHUFFLE:
return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
return LowerShift(Op, DAG);
}
}
@ -1000,6 +1009,35 @@ WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, MVT::v16i8, Ops);
}
SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
auto *ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode());
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
if (!ShiftVec || !ShiftVec->isConstantSplat(SplatValue, SplatUndef,
SplatBitSize, HasAnyUndefs))
return Op;
unsigned Opcode;
switch (Op.getOpcode()) {
case ISD::SHL:
Opcode = WebAssemblyISD::VEC_SHL;
break;
case ISD::SRA:
Opcode = WebAssemblyISD::VEC_SHR_S;
break;
case ISD::SRL:
Opcode = WebAssemblyISD::VEC_SHR_U;
break;
default:
llvm_unreachable("unexpected opcode");
return Op;
}
return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
DAG.getConstant(SplatValue.trunc(32), DL, MVT::i32));
}
//===----------------------------------------------------------------------===//
// WebAssembly Optimization Hooks
//===----------------------------------------------------------------------===//

View File

@ -99,6 +99,7 @@ private:
SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
};
namespace WebAssembly {

View File

@ -515,6 +515,19 @@ foreach shifts = [[shl, SHL_v2i64], [sra, SHR_S_v2i64], [srl, SHR_U_v2i64]] in
def : Pat<(v2i64 (shifts[0] (v2i64 V128:$vec), (v2i64 (splat2 I64:$x)))),
(v2i64 (shifts[1] (v2i64 V128:$vec), (I32_WRAP_I64 I64:$x)))>;
// 2xi64 shifts with constant shift amounts are custom lowered to avoid wrapping
def wasm_shift_t : SDTypeProfile<1, 2,
[SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, i32>]
>;
def wasm_shl : SDNode<"WebAssemblyISD::VEC_SHL", wasm_shift_t>;
def wasm_shr_s : SDNode<"WebAssemblyISD::VEC_SHR_S", wasm_shift_t>;
def wasm_shr_u : SDNode<"WebAssemblyISD::VEC_SHR_U", wasm_shift_t>;
foreach shifts = [[wasm_shl, SHL_v2i64],
[wasm_shr_s, SHR_S_v2i64],
[wasm_shr_u, SHR_U_v2i64]] in
def : Pat<(v2i64 (shifts[0] (v2i64 V128:$vec), I32:$x)),
(v2i64 (shifts[1] (v2i64 V128:$vec), I32:$x))>;
//===----------------------------------------------------------------------===//
// Bitwise operations
//===----------------------------------------------------------------------===//

View File

@ -605,9 +605,8 @@ define <2 x i64> @shl_nozext_v2i64(<2 x i64> %v, i64 %x) {
; NO-SIMD128-NOT: i64x2
; SIMD128-NEXT: .param v128{{$}}
; SIMD128-NEXT: .result v128{{$}}
; SIMD128-NEXT: i64.const $push[[L0:[0-9]+]]=, 5{{$}}
; SIMD128-NEXT: i32.wrap/i64 $push[[L1:[0-9]+]]=, $pop[[L0]]{{$}}
; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L1]]{{$}}
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}}
; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <2 x i64> @shl_const_v2i64(<2 x i64> %v) {
%a = shl <2 x i64> %v, <i64 5, i64 5>
@ -642,6 +641,18 @@ define <2 x i64> @shr_s_nozext_v2i64(<2 x i64> %v, i64 %x) {
ret <2 x i64> %a
}
; CHECK-LABEL: shr_s_const_v2i64:
; NO-SIMD128-NOT: i64x2
; SIMD128-NEXT: .param v128{{$}}
; SIMD128-NEXT: .result v128{{$}}
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}}
; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <2 x i64> @shr_s_const_v2i64(<2 x i64> %v) {
%a = ashr <2 x i64> %v, <i64 5, i64 5>
ret <2 x i64> %a
}
; CHECK-LABEL: shr_u_v2i64:
; NO-SIMD128-NOT: i64x2
; SIMD128-NEXT: .param v128, i32{{$}}
@ -670,6 +681,18 @@ define <2 x i64> @shr_u_nozext_v2i64(<2 x i64> %v, i64 %x) {
ret <2 x i64> %a
}
; CHECK-LABEL: shr_u_const_v2i64:
; NO-SIMD128-NOT: i64x2
; SIMD128-NEXT: .param v128{{$}}
; SIMD128-NEXT: .result v128{{$}}
; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}}
; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <2 x i64> @shr_u_const_v2i64(<2 x i64> %v) {
%a = lshr <2 x i64> %v, <i64 5, i64 5>
ret <2 x i64> %a
}
; CHECK-LABEL: and_v2i64:
; NO-SIMD128-NOT: v128
; SIMD128-VM-NOT: v128