1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[SVE][MVT] Fixed-length vector MVT ranges

* Reordered MVT simple types to group scalable vector types
    together.
  * New range functions in MachineValueType.h to only iterate over
    the fixed-length int/fp vector types.
  * Stopped backends which don't support scalable vector types from
    iterating over scalable types.

Reviewers: sdesmalen, greened

Reviewed By: greened

Differential Revision: https://reviews.llvm.org/D66339

llvm-svn: 372099
This commit is contained in:
Graham Hunter 2019-09-17 10:19:23 +00:00
parent 0cc8fb27ad
commit 15543f7650
18 changed files with 202 additions and 167 deletions

View File

@ -87,65 +87,65 @@ def v32i64 : ValueType<2048,61>; // 32 x i64 vector value
def v1i128 : ValueType<128, 62>; // 1 x i128 vector value
def nxv1i1 : ValueType<1, 63>; // n x 1 x i1 vector value
def nxv2i1 : ValueType<2, 64>; // n x 2 x i1 vector value
def nxv4i1 : ValueType<4, 65>; // n x 4 x i1 vector value
def nxv8i1 : ValueType<8, 66>; // n x 8 x i1 vector value
def nxv16i1 : ValueType<16, 67>; // n x 16 x i1 vector value
def nxv32i1 : ValueType<32, 68>; // n x 32 x i1 vector value
def v2f16 : ValueType<32 , 63>; // 2 x f16 vector value
def v3f16 : ValueType<48 , 64>; // 3 x f16 vector value
def v4f16 : ValueType<64 , 65>; // 4 x f16 vector value
def v8f16 : ValueType<128, 66>; // 8 x f16 vector value
def v16f16 : ValueType<256, 67>; // 8 x f16 vector value
def v32f16 : ValueType<512, 68>; // 8 x f16 vector value
def v1f32 : ValueType<32 , 69>; // 1 x f32 vector value
def v2f32 : ValueType<64 , 70>; // 2 x f32 vector value
def v3f32 : ValueType<96 , 71>; // 3 x f32 vector value
def v4f32 : ValueType<128, 72>; // 4 x f32 vector value
def v5f32 : ValueType<160, 73>; // 5 x f32 vector value
def v8f32 : ValueType<256, 74>; // 8 x f32 vector value
def v16f32 : ValueType<512, 75>; // 16 x f32 vector value
def v32f32 : ValueType<1024, 76>; // 32 x f32 vector value
def v64f32 : ValueType<2048, 77>; // 64 x f32 vector value
def v128f32 : ValueType<4096, 78>; // 128 x f32 vector value
def v256f32 : ValueType<8182, 79>; // 256 x f32 vector value
def v512f32 : ValueType<16384, 80>; // 512 x f32 vector value
def v1024f32 : ValueType<32768, 81>; // 1024 x f32 vector value
def v2048f32 : ValueType<65536, 82>; // 2048 x f32 vector value
def v1f64 : ValueType<64, 83>; // 1 x f64 vector value
def v2f64 : ValueType<128, 84>; // 2 x f64 vector value
def v4f64 : ValueType<256, 85>; // 4 x f64 vector value
def v8f64 : ValueType<512, 86>; // 8 x f64 vector value
def nxv1i8 : ValueType<8, 69>; // n x 1 x i8 vector value
def nxv2i8 : ValueType<16, 70>; // n x 2 x i8 vector value
def nxv4i8 : ValueType<32, 71>; // n x 4 x i8 vector value
def nxv8i8 : ValueType<64, 72>; // n x 8 x i8 vector value
def nxv16i8 : ValueType<128, 73>; // n x 16 x i8 vector value
def nxv32i8 : ValueType<256, 74>; // n x 32 x i8 vector value
def nxv1i1 : ValueType<1, 87>; // n x 1 x i1 vector value
def nxv2i1 : ValueType<2, 88>; // n x 2 x i1 vector value
def nxv4i1 : ValueType<4, 89>; // n x 4 x i1 vector value
def nxv8i1 : ValueType<8, 90>; // n x 8 x i1 vector value
def nxv16i1 : ValueType<16, 91>; // n x 16 x i1 vector value
def nxv32i1 : ValueType<32, 92>; // n x 32 x i1 vector value
def nxv1i16 : ValueType<16, 75>; // n x 1 x i16 vector value
def nxv2i16 : ValueType<32, 76>; // n x 2 x i16 vector value
def nxv4i16 : ValueType<64, 77>; // n x 4 x i16 vector value
def nxv8i16 : ValueType<128, 78>; // n x 8 x i16 vector value
def nxv16i16: ValueType<256, 79>; // n x 16 x i16 vector value
def nxv32i16: ValueType<512, 80>; // n x 32 x i16 vector value
def nxv1i8 : ValueType<8, 93>; // n x 1 x i8 vector value
def nxv2i8 : ValueType<16, 94>; // n x 2 x i8 vector value
def nxv4i8 : ValueType<32, 95>; // n x 4 x i8 vector value
def nxv8i8 : ValueType<64, 96>; // n x 8 x i8 vector value
def nxv16i8 : ValueType<128, 97>; // n x 16 x i8 vector value
def nxv32i8 : ValueType<256, 98>; // n x 32 x i8 vector value
def nxv1i32 : ValueType<32, 81>; // n x 1 x i32 vector value
def nxv2i32 : ValueType<64, 82>; // n x 2 x i32 vector value
def nxv4i32 : ValueType<128, 83>; // n x 4 x i32 vector value
def nxv8i32 : ValueType<256, 84>; // n x 8 x i32 vector value
def nxv16i32: ValueType<512, 85>; // n x 16 x i32 vector value
def nxv32i32: ValueType<1024,86>; // n x 32 x i32 vector value
def nxv1i16 : ValueType<16, 99>; // n x 1 x i16 vector value
def nxv2i16 : ValueType<32, 100>; // n x 2 x i16 vector value
def nxv4i16 : ValueType<64, 101>; // n x 4 x i16 vector value
def nxv8i16 : ValueType<128, 102>; // n x 8 x i16 vector value
def nxv16i16: ValueType<256, 103>; // n x 16 x i16 vector value
def nxv32i16: ValueType<512, 104>; // n x 32 x i16 vector value
def nxv1i64 : ValueType<64, 87>; // n x 1 x i64 vector value
def nxv2i64 : ValueType<128, 88>; // n x 2 x i64 vector value
def nxv4i64 : ValueType<256, 89>; // n x 4 x i64 vector value
def nxv8i64 : ValueType<512, 90>; // n x 8 x i64 vector value
def nxv16i64: ValueType<1024,91>; // n x 16 x i64 vector value
def nxv32i64: ValueType<2048,92>; // n x 32 x i64 vector value
def nxv1i32 : ValueType<32, 105>; // n x 1 x i32 vector value
def nxv2i32 : ValueType<64, 106>; // n x 2 x i32 vector value
def nxv4i32 : ValueType<128, 107>; // n x 4 x i32 vector value
def nxv8i32 : ValueType<256, 108>; // n x 8 x i32 vector value
def nxv16i32: ValueType<512, 109>; // n x 16 x i32 vector value
def nxv32i32: ValueType<1024,110>; // n x 32 x i32 vector value
def v2f16 : ValueType<32 , 93>; // 2 x f16 vector value
def v3f16 : ValueType<48 , 94>; // 3 x f16 vector value
def v4f16 : ValueType<64 , 95>; // 4 x f16 vector value
def v8f16 : ValueType<128, 96>; // 8 x f16 vector value
def v16f16 : ValueType<256, 97>; // 8 x f16 vector value
def v32f16 : ValueType<512, 98>; // 8 x f16 vector value
def v1f32 : ValueType<32 , 99>; // 1 x f32 vector value
def v2f32 : ValueType<64 , 100>; // 2 x f32 vector value
def v3f32 : ValueType<96 , 101>; // 3 x f32 vector value
def v4f32 : ValueType<128, 102>; // 4 x f32 vector value
def v5f32 : ValueType<160, 103>; // 5 x f32 vector value
def v8f32 : ValueType<256, 104>; // 8 x f32 vector value
def v16f32 : ValueType<512, 105>; // 16 x f32 vector value
def v32f32 : ValueType<1024, 106>; // 32 x f32 vector value
def v64f32 : ValueType<2048, 107>; // 64 x f32 vector value
def v128f32 : ValueType<4096, 108>; // 128 x f32 vector value
def v256f32 : ValueType<8182, 109>; // 256 x f32 vector value
def v512f32 : ValueType<16384, 110>; // 512 x f32 vector value
def v1024f32 : ValueType<32768, 111>; // 1024 x f32 vector value
def v2048f32 : ValueType<65536, 112>; // 2048 x f32 vector value
def v1f64 : ValueType<64, 113>; // 1 x f64 vector value
def v2f64 : ValueType<128, 114>; // 2 x f64 vector value
def v4f64 : ValueType<256, 115>; // 4 x f64 vector value
def v8f64 : ValueType<512, 116>; // 8 x f64 vector value
def nxv1i64 : ValueType<64, 111>; // n x 1 x i64 vector value
def nxv2i64 : ValueType<128, 112>; // n x 2 x i64 vector value
def nxv4i64 : ValueType<256, 113>; // n x 4 x i64 vector value
def nxv8i64 : ValueType<512, 114>; // n x 8 x i64 vector value
def nxv16i64: ValueType<1024,115>; // n x 16 x i64 vector value
def nxv32i64: ValueType<2048,116>; // n x 32 x i64 vector value
def nxv2f16 : ValueType<32 , 117>; // n x 2 x f16 vector value
def nxv4f16 : ValueType<64 , 118>; // n x 4 x f16 vector value

View File

@ -112,72 +112,77 @@ namespace llvm {
v1i128 = 62, // 1 x i128
// Scalable integer types
nxv1i1 = 63, // n x 1 x i1
nxv2i1 = 64, // n x 2 x i1
nxv4i1 = 65, // n x 4 x i1
nxv8i1 = 66, // n x 8 x i1
nxv16i1 = 67, // n x 16 x i1
nxv32i1 = 68, // n x 32 x i1
FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128,
nxv1i8 = 69, // n x 1 x i8
nxv2i8 = 70, // n x 2 x i8
nxv4i8 = 71, // n x 4 x i8
nxv8i8 = 72, // n x 8 x i8
nxv16i8 = 73, // n x 16 x i8
nxv32i8 = 74, // n x 32 x i8
v2f16 = 63, // 2 x f16
v3f16 = 64, // 3 x f16
v4f16 = 65, // 4 x f16
v8f16 = 66, // 8 x f16
v16f16 = 67, // 16 x f16
v32f16 = 68, // 32 x f16
v1f32 = 69, // 1 x f32
v2f32 = 70, // 2 x f32
v3f32 = 71, // 3 x f32
v4f32 = 72, // 4 x f32
v5f32 = 73, // 5 x f32
v8f32 = 74, // 8 x f32
v16f32 = 75, // 16 x f32
v32f32 = 76, // 32 x f32
v64f32 = 77, // 64 x f32
v128f32 = 78, // 128 x f32
v256f32 = 79, // 256 x f32
v512f32 = 80, // 512 x f32
v1024f32 = 81, // 1024 x f32
v2048f32 = 82, // 2048 x f32
v1f64 = 83, // 1 x f64
v2f64 = 84, // 2 x f64
v4f64 = 85, // 4 x f64
v8f64 = 86, // 8 x f64
nxv1i16 = 75, // n x 1 x i16
nxv2i16 = 76, // n x 2 x i16
nxv4i16 = 77, // n x 4 x i16
nxv8i16 = 78, // n x 8 x i16
nxv16i16 = 79, // n x 16 x i16
nxv32i16 = 80, // n x 32 x i16
FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v2f16,
LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v8f64,
nxv1i32 = 81, // n x 1 x i32
nxv2i32 = 82, // n x 2 x i32
nxv4i32 = 83, // n x 4 x i32
nxv8i32 = 84, // n x 8 x i32
nxv16i32 = 85, // n x 16 x i32
nxv32i32 = 86, // n x 32 x i32
FIRST_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
LAST_FIXEDLEN_VECTOR_VALUETYPE = v8f64,
nxv1i64 = 87, // n x 1 x i64
nxv2i64 = 88, // n x 2 x i64
nxv4i64 = 89, // n x 4 x i64
nxv8i64 = 90, // n x 8 x i64
nxv16i64 = 91, // n x 16 x i64
nxv32i64 = 92, // n x 32 x i64
nxv1i1 = 87, // n x 1 x i1
nxv2i1 = 88, // n x 2 x i1
nxv4i1 = 89, // n x 4 x i1
nxv8i1 = 90, // n x 8 x i1
nxv16i1 = 91, // n x 16 x i1
nxv32i1 = 92, // n x 32 x i1
FIRST_INTEGER_VECTOR_VALUETYPE = v1i1,
LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64,
nxv1i8 = 93, // n x 1 x i8
nxv2i8 = 94, // n x 2 x i8
nxv4i8 = 95, // n x 4 x i8
nxv8i8 = 96, // n x 8 x i8
nxv16i8 = 97, // n x 16 x i8
nxv32i8 = 98, // n x 32 x i8
FIRST_INTEGER_SCALABLE_VALUETYPE = nxv1i1,
LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64,
nxv1i16 = 99, // n x 1 x i16
nxv2i16 = 100, // n x 2 x i16
nxv4i16 = 101, // n x 4 x i16
nxv8i16 = 102, // n x 8 x i16
nxv16i16 = 103, // n x 16 x i16
nxv32i16 = 104, // n x 32 x i16
v2f16 = 93, // 2 x f16
v3f16 = 94, // 3 x f16
v4f16 = 95, // 4 x f16
v8f16 = 96, // 8 x f16
v16f16 = 97, // 16 x f16
v32f16 = 98, // 32 x f16
v1f32 = 99, // 1 x f32
v2f32 = 100, // 2 x f32
v3f32 = 101, // 3 x f32
v4f32 = 102, // 4 x f32
v5f32 = 103, // 5 x f32
v8f32 = 104, // 8 x f32
v16f32 = 105, // 16 x f32
v32f32 = 106, // 32 x f32
v64f32 = 107, // 64 x f32
v128f32 = 108, // 128 x f32
v256f32 = 109, // 256 x f32
v512f32 = 110, // 512 x f32
v1024f32 = 111, // 1024 x f32
v2048f32 = 112, // 2048 x f32
v1f64 = 113, // 1 x f64
v2f64 = 114, // 2 x f64
v4f64 = 115, // 4 x f64
v8f64 = 116, // 8 x f64
nxv1i32 = 105, // n x 1 x i32
nxv2i32 = 106, // n x 2 x i32
nxv4i32 = 107, // n x 4 x i32
nxv8i32 = 108, // n x 8 x i32
nxv16i32 = 109, // n x 16 x i32
nxv32i32 = 110, // n x 32 x i32
nxv1i64 = 111, // n x 1 x i64
nxv2i64 = 112, // n x 2 x i64
nxv4i64 = 113, // n x 4 x i64
nxv8i64 = 114, // n x 8 x i64
nxv16i64 = 115, // n x 16 x i64
nxv32i64 = 116, // n x 32 x i64
FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv32i64,
nxv2f16 = 117, // n x 2 x f16
nxv4f16 = 118, // n x 4 x f16
@ -192,11 +197,11 @@ namespace llvm {
nxv4f64 = 127, // n x 4 x f64
nxv8f64 = 128, // n x 8 x f64
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = nxv8f64,
FIRST_FP_SCALABLE_VECTOR_VALUETYPE = nxv2f16,
LAST_FP_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
FIRST_FP_SCALABLE_VALUETYPE = nxv2f16,
LAST_FP_SCALABLE_VALUETYPE = nxv8f64,
FIRST_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
LAST_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
FIRST_VECTOR_VALUETYPE = v1i1,
LAST_VECTOR_VALUETYPE = nxv8f64,
@ -278,16 +283,20 @@ namespace llvm {
bool isFloatingPoint() const {
return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
(SimpleTy >= MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE));
}
/// Return true if this is an integer or a vector integer type.
bool isInteger() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
(SimpleTy >= MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE));
}
/// Return true if this is an integer, not including vectors.
@ -305,10 +314,13 @@ namespace llvm {
/// Return true if this is a vector value type where the
/// runtime length is machine dependent
bool isScalableVector() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_FP_SCALABLE_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_SCALABLE_VALUETYPE));
return (SimpleTy >= MVT::FIRST_SCALABLE_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
}
bool isFixedLengthVector() const {
return (SimpleTy >= MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
}
/// Return true if this is a 16-bit vector type.
@ -1104,26 +1116,40 @@ namespace llvm {
(MVT::SimpleValueType)(MVT::LAST_VECTOR_VALUETYPE + 1));
}
static mvt_range integer_vector_valuetypes() {
static mvt_range fixedlen_vector_valuetypes() {
return mvt_range(
MVT::FIRST_INTEGER_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_INTEGER_VECTOR_VALUETYPE + 1));
MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE + 1));
}
static mvt_range fp_vector_valuetypes() {
static mvt_range scalable_vector_valuetypes() {
return mvt_range(
MVT::FIRST_FP_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1));
MVT::FIRST_SCALABLE_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_SCALABLE_VECTOR_VALUETYPE + 1));
}
static mvt_range integer_fixedlen_vector_valuetypes() {
return mvt_range(
MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE + 1));
}
static mvt_range fp_fixedlen_vector_valuetypes() {
return mvt_range(
MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE + 1));
}
static mvt_range integer_scalable_vector_valuetypes() {
return mvt_range(MVT::FIRST_INTEGER_SCALABLE_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VALUETYPE + 1));
return mvt_range(
MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE + 1));
}
static mvt_range fp_scalable_vector_valuetypes() {
return mvt_range(MVT::FIRST_FP_SCALABLE_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_SCALABLE_VALUETYPE + 1));
return mvt_range(
MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE,
(MVT::SimpleValueType)(MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE + 1));
}
/// @}
};

View File

@ -1266,7 +1266,8 @@ void TargetLoweringBase::computeRegisterProperties(
case TypePromoteInteger:
// Try to promote the elements of integer vectors. If no legal
// promotion was found, fall through to the widen-vector method.
for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
for (unsigned nVT = i + 1;
nVT <= MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE; ++nVT) {
MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.

View File

@ -307,7 +307,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
// AArch64 lacks both left-rotate and popcount instructions.
setOperationAction(ISD::ROTL, MVT::i32, Expand);
setOperationAction(ISD::ROTL, MVT::i64, Expand);
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
}
@ -321,7 +321,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
}
@ -754,7 +754,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
// Likewise, narrowing and extending vector loads/stores aren't handled
// directly.
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) {
@ -770,7 +770,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);

View File

@ -134,7 +134,7 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
}
for (MVT VT : MVT::integer_vector_valuetypes()) {
for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);

View File

@ -704,8 +704,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
}
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT InnerVT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
addAllExtLoads(VT, InnerVT, Expand);
}
@ -910,7 +910,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// It is legal to extload from v4i8 to v4i16 or v4i32.
for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
MVT::v2i32}) {
for (MVT VT : MVT::integer_vector_valuetypes()) {
for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
@ -1054,7 +1054,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
// ARM does not have ROTL.
setOperationAction(ISD::ROTL, MVT::i32, Expand);
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
}

View File

@ -1439,12 +1439,12 @@ HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE
};
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
for (unsigned VectExpOp : VectExpOps)
setOperationAction(VectExpOp, VT, Expand);
// Expand all extending loads and truncating stores:
for (MVT TargetVT : MVT::vector_valuetypes()) {
for (MVT TargetVT : MVT::fixedlen_vector_valuetypes()) {
if (TargetVT == VT)
continue;
setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand);
@ -1864,7 +1864,7 @@ bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
TargetLoweringBase::LegalizeTypeAction
HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {
if (VT.getVectorNumElements() == 1)
if (VT.getVectorNumElements() == 1 || VT.isScalableVector())
return TargetLoweringBase::TypeScalarizeVector;
// Always widen vectors of i1.

View File

@ -228,7 +228,7 @@ public:
}
bool isHVXVectorType(MVT VecTy, bool IncludeBool = false) const {
if (!VecTy.isVector() || !useHVXOps())
if (!VecTy.isVector() || !useHVXOps() || VecTy.isScalableVector())
return false;
MVT ElemTy = VecTy.getVectorElementType();
if (!IncludeBool && ElemTy == MVT::i1)

View File

@ -45,6 +45,8 @@ bool HexagonTTIImpl::useHVX() const {
bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const {
assert(VecTy->isVectorTy());
if (cast<VectorType>(VecTy)->isScalable())
return false;
// Avoid types like <2 x i32*>.
if (!cast<VectorType>(VecTy)->getElementType()->isIntegerTy())
return false;

View File

@ -330,7 +330,7 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
}
// Set LoadExtAction for f16 vectors to Expand
for (MVT VT : MVT::fp_vector_valuetypes()) {
for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
if (F16VT.isValid())
setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand);

View File

@ -71,8 +71,8 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM,
if (Subtarget.hasDSP() || Subtarget.hasMSA()) {
// Expand all truncating stores and extending loads.
for (MVT VT0 : MVT::vector_valuetypes()) {
for (MVT VT1 : MVT::vector_valuetypes()) {
for (MVT VT0 : MVT::fixedlen_vector_valuetypes()) {
for (MVT VT1 : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT0, VT1, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand);

View File

@ -480,7 +480,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setOperationAction(ISD::TRAP, MVT::Other, Legal);
// Register custom handling for vector loads/stores
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
if (IsPTXVectorType(VT)) {
setOperationAction(ISD::LOAD, VT, Custom);
setOperationAction(ISD::STORE, VT, Custom);

View File

@ -551,7 +551,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
if (Subtarget.hasAltivec()) {
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
// add/sub are legal for all supported vector VT's.
setOperationAction(ISD::ADD, VT, Legal);
setOperationAction(ISD::SUB, VT, Legal);
@ -652,7 +652,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);

View File

@ -1438,7 +1438,7 @@ SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
setOperationAction(Op, MVT::v2i32, Expand);
}
// Truncating/extending stores/loads are also not supported.
for (MVT VT : MVT::integer_vector_valuetypes()) {
for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);

View File

@ -300,14 +300,14 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
// Handle prefetches with PFD or PFDRL.
setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
// Assume by default that all vector operations need to be expanded.
for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
if (getOperationAction(Opcode, VT) == Legal)
setOperationAction(Opcode, VT, Expand);
// Likewise all truncating stores and extending loads.
for (MVT InnerVT : MVT::vector_valuetypes()) {
for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(VT, InnerVT, Expand);
setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
@ -333,7 +333,7 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
}
// Handle integer vector types.
for (MVT VT : MVT::integer_vector_valuetypes()) {
for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
if (isTypeLegal(VT)) {
// These operations have direct equivalents.
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);

View File

@ -205,7 +205,7 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
for (auto T : {MVT::i8, MVT::i16, MVT::i32})
setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
}
for (auto T : MVT::integer_vector_valuetypes())
for (auto T : MVT::integer_fixedlen_vector_valuetypes())
setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
// Dynamic stack allocation: use the default expansion.
@ -237,7 +237,7 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
if (Subtarget->hasSIMD128()) {
for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
MVT::v2f64}) {
for (auto MemT : MVT::vector_valuetypes()) {
for (auto MemT : MVT::fixedlen_vector_valuetypes()) {
if (MVT(T) != MemT) {
setTruncStoreAction(T, MemT, Expand);
for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})

View File

@ -749,7 +749,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
for (MVT VT : MVT::vector_valuetypes()) {
for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
@ -787,7 +787,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
setTruncStoreAction(InnerVT, VT, Expand);
setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
@ -948,7 +948,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
// scalars) and extend in-register to a legal 128-bit vector type. For sext
// loads these must work with a single scalar load.
if (!ExperimentalVectorWideningLegalization) {
for (MVT VT : MVT::integer_vector_valuetypes()) {
for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
@ -1112,7 +1112,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!ExperimentalVectorWideningLegalization) {
// Avoid narrow result types when widening. The legal types are listed
// in the next loop.
for (MVT VT : MVT::integer_vector_valuetypes()) {
for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);

View File

@ -769,7 +769,10 @@ void TypeInfer::expandOverloads(TypeSetByHwMode::SetType &Out,
for (MVT T : MVT::integer_valuetypes())
if (Legal.count(T))
Out.insert(T);
for (MVT T : MVT::integer_vector_valuetypes())
for (MVT T : MVT::integer_fixedlen_vector_valuetypes())
if (Legal.count(T))
Out.insert(T);
for (MVT T : MVT::integer_scalable_vector_valuetypes())
if (Legal.count(T))
Out.insert(T);
return;
@ -777,7 +780,10 @@ void TypeInfer::expandOverloads(TypeSetByHwMode::SetType &Out,
for (MVT T : MVT::fp_valuetypes())
if (Legal.count(T))
Out.insert(T);
for (MVT T : MVT::fp_vector_valuetypes())
for (MVT T : MVT::fp_fixedlen_vector_valuetypes())
if (Legal.count(T))
Out.insert(T);
for (MVT T : MVT::fp_scalable_vector_valuetypes())
if (Legal.count(T))
Out.insert(T);
return;