# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # # Check that we can recognize a shuffle mask for a uzp instruction and produce # a G_UZP1 or G_UZP2 where appropriate. # # RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s ... --- name: uzp1_v4s32 legalized: true tracksRegLiveness: true body: | bb.1.entry: liveins: $q0, $q1 ; CHECK-LABEL: name: uzp1_v4s32 ; CHECK: liveins: $q0, $q1 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[UZP1_:%[0-9]+]]:_(<4 x s32>) = G_UZP1 [[COPY]], [[COPY1]] ; CHECK: $q0 = COPY [[UZP1_]](<4 x s32>) ; CHECK: RET_ReallyLR implicit $q0 %0:_(<4 x s32>) = COPY $q0 %1:_(<4 x s32>) = COPY $q1 %2:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(0, 2, 4, 6) $q0 = COPY %2(<4 x s32>) RET_ReallyLR implicit $q0 ... --- name: uzp2_v4s32 legalized: true tracksRegLiveness: true body: | bb.1.entry: liveins: $q0, $q1 ; CHECK-LABEL: name: uzp2_v4s32 ; CHECK: liveins: $q0, $q1 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[UZP2_:%[0-9]+]]:_(<4 x s32>) = G_UZP2 [[COPY]], [[UZP2_]] ; CHECK: $q0 = COPY [[UZP2_]](<4 x s32>) ; CHECK: RET_ReallyLR implicit $q0 %0:_(<4 x s32>) = COPY $q0 %1:_(<4 x s32>) = COPY $q1 %1:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(1, 3, 5, 7) $q0 = COPY %1(<4 x s32>) RET_ReallyLR implicit $q0 ... --- name: no_uzp1 legalized: true tracksRegLiveness: true body: | bb.1.entry: liveins: $q0, $q1 ; See isUZPMask: Mask[1] != 2 * i + 0 ; CHECK-LABEL: name: no_uzp1 ; CHECK: liveins: $q0, $q1 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<4 x s32>), [[COPY1]], shufflemask(0, 1, 4, 6) ; CHECK: $q0 = COPY [[SHUF]](<4 x s32>) ; CHECK: RET_ReallyLR implicit $q0 %0:_(<4 x s32>) = COPY $q0 %1:_(<4 x s32>) = COPY $q1 %2:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(0, 1, 4, 6) $q0 = COPY %2(<4 x s32>) RET_ReallyLR implicit $q0 ... --- name: no_uzp2 legalized: true tracksRegLiveness: true body: | bb.1.entry: liveins: $q0, $q1 ; See isUZPMask: Mask[1] != 2 * i + 1 ; CHECK-LABEL: name: no_uzp2 ; CHECK: liveins: $q0, $q1 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY]](<4 x s32>), [[COPY1]], shufflemask(1, 4, 5, 7) ; CHECK: $q0 = COPY [[SHUF]](<4 x s32>) ; CHECK: RET_ReallyLR implicit $q0 %0:_(<4 x s32>) = COPY $q0 %1:_(<4 x s32>) = COPY $q1 %2:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(1, 4, 5, 7) $q0 = COPY %2(<4 x s32>) RET_ReallyLR implicit $q0 ... --- name: uzp1_undef legalized: true tracksRegLiveness: true body: | bb.1.entry: liveins: $q0, $q1 ; Make sure that we can still produce a uzp1/uzp2 with undef indices. ; CHECK-LABEL: name: uzp1_undef ; CHECK: liveins: $q0, $q1 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[UZP1_:%[0-9]+]]:_(<4 x s32>) = G_UZP1 [[COPY]], [[COPY1]] ; CHECK: $q0 = COPY [[UZP1_]](<4 x s32>) ; CHECK: RET_ReallyLR implicit $q0 %0:_(<4 x s32>) = COPY $q0 %1:_(<4 x s32>) = COPY $q1 %2:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(0, -1, 4, 6) $q0 = COPY %2(<4 x s32>) RET_ReallyLR implicit $q0 ... --- name: uzp2_undef legalized: true tracksRegLiveness: true body: | bb.1.entry: liveins: $q0, $q1 ; Make sure that we can still produce a uzp1/uzp2 with undef indices. ; CHECK-LABEL: name: uzp2_undef ; CHECK: liveins: $q0, $q1 ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1 ; CHECK: [[UZP2_:%[0-9]+]]:_(<4 x s32>) = G_UZP2 [[COPY]], [[UZP2_]] ; CHECK: $q0 = COPY [[UZP2_]](<4 x s32>) ; CHECK: RET_ReallyLR implicit $q0 %0:_(<4 x s32>) = COPY $q0 %1:_(<4 x s32>) = COPY $q1 %1:_(<4 x s32>) = G_SHUFFLE_VECTOR %0(<4 x s32>), %1, shufflemask(1, 3, -1, 7) $q0 = COPY %1(<4 x s32>) RET_ReallyLR implicit $q0