diff --git a/test/CodeGen/AArch64/arm64-inline-asm.ll b/test/CodeGen/AArch64/arm64-inline-asm.ll index dfb2bc8a5e9..802d95826ce 100644 --- a/test/CodeGen/AArch64/arm64-inline-asm.ll +++ b/test/CodeGen/AArch64/arm64-inline-asm.ll @@ -170,7 +170,7 @@ entry: %data = alloca <4 x float>, align 16 %0 = load <4 x float>, <4 x float>* %data, align 16 call void asm sideeffect "mov.2d v4, $0\0A", "x,~{v4}"(<4 x float> %0) nounwind - ; CHECK mov.2d v4, {{v([0-9])|(1[0-5])}} + ; CHECK: mov.2d v4, {{v([0-9])|(1[0-5])}} ret void } diff --git a/test/CodeGen/AArch64/arm64-ld1.ll b/test/CodeGen/AArch64/arm64-ld1.ll index 209065e9d24..a83a2703add 100644 --- a/test/CodeGen/AArch64/arm64-ld1.ll +++ b/test/CodeGen/AArch64/arm64-ld1.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs | FileCheck %s +; RUN: llc < %s -march=arm64 -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } @@ -8,8 +8,8 @@ define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind { ; CHECK-LABEL: ld2_8b ; Make sure we are loading into the results defined by the ABI (i.e., v0, v1) ; and from the argument of the function also defined by ABI (i.e., x0) -; CHECK ld2.8b { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.8b { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A) ret %struct.__neon_int8x8x2_t %tmp2 } @@ -17,8 +17,8 @@ define %struct.__neon_int8x8x2_t @ld2_8b(i8* %A) nounwind { define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind { ; CHECK-LABEL: ld3_8b ; Make sure we are using the operands defined by the ABI -; CHECK ld3.8b { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.8b { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A) ret %struct.__neon_int8x8x3_t %tmp2 } @@ -26,8 +26,8 @@ define %struct.__neon_int8x8x3_t @ld3_8b(i8* %A) nounwind { define %struct.__neon_int8x8x4_t @ld4_8b(i8* %A) nounwind { ; CHECK-LABEL: ld4_8b ; Make sure we are using the operands defined by the ABI -; CHECK ld4.8b { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.8b { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A) ret %struct.__neon_int8x8x4_t %tmp2 } @@ -43,8 +43,8 @@ declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4.v8i8.p0i8(i8*) nounwind define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind { ; CHECK-LABEL: ld2_16b ; Make sure we are using the operands defined by the ABI -; CHECK ld2.16b { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.16b { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A) ret %struct.__neon_int8x16x2_t %tmp2 } @@ -52,8 +52,8 @@ define %struct.__neon_int8x16x2_t @ld2_16b(i8* %A) nounwind { define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind { ; CHECK-LABEL: ld3_16b ; Make sure we are using the operands defined by the ABI -; CHECK ld3.16b { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.16b { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A) ret %struct.__neon_int8x16x3_t %tmp2 } @@ -61,8 +61,8 @@ define %struct.__neon_int8x16x3_t @ld3_16b(i8* %A) nounwind { define %struct.__neon_int8x16x4_t @ld4_16b(i8* %A) nounwind { ; CHECK-LABEL: ld4_16b ; Make sure we are using the operands defined by the ABI -; CHECK ld4.16b { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.16b { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A) ret %struct.__neon_int8x16x4_t %tmp2 } @@ -78,8 +78,8 @@ declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*) nounwi define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind { ; CHECK-LABEL: ld2_4h ; Make sure we are using the operands defined by the ABI -; CHECK ld2.4h { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.4h { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A) ret %struct.__neon_int16x4x2_t %tmp2 } @@ -87,8 +87,8 @@ define %struct.__neon_int16x4x2_t @ld2_4h(i16* %A) nounwind { define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind { ; CHECK-LABEL: ld3_4h ; Make sure we are using the operands defined by the ABI -; CHECK ld3.4h { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.4h { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A) ret %struct.__neon_int16x4x3_t %tmp2 } @@ -96,8 +96,8 @@ define %struct.__neon_int16x4x3_t @ld3_4h(i16* %A) nounwind { define %struct.__neon_int16x4x4_t @ld4_4h(i16* %A) nounwind { ; CHECK-LABEL: ld4_4h ; Make sure we are using the operands defined by the ABI -; CHECK ld4.4h { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.4h { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A) ret %struct.__neon_int16x4x4_t %tmp2 } @@ -113,8 +113,8 @@ declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*) noun define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind { ; CHECK-LABEL: ld2_8h ; Make sure we are using the operands defined by the ABI -; CHECK ld2.8h { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.8h { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A) ret %struct.__neon_int16x8x2_t %tmp2 } @@ -122,8 +122,8 @@ define %struct.__neon_int16x8x2_t @ld2_8h(i16* %A) nounwind { define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind { ; CHECK-LABEL: ld3_8h ; Make sure we are using the operands defined by the ABI -; CHECK ld3.8h { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.8h { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A) ret %struct.__neon_int16x8x3_t %tmp2 } @@ -131,8 +131,8 @@ define %struct.__neon_int16x8x3_t @ld3_8h(i16* %A) nounwind { define %struct.__neon_int16x8x4_t @ld4_8h(i16* %A) nounwind { ; CHECK-LABEL: ld4_8h ; Make sure we are using the operands defined by the ABI -; CHECK ld4.8h { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.8h { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A) ret %struct.__neon_int16x8x4_t %tmp2 } @@ -148,8 +148,8 @@ declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4.v8i16.p0i16(i16*) noun define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind { ; CHECK-LABEL: ld2_2s ; Make sure we are using the operands defined by the ABI -; CHECK ld2.2s { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.2s { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A) ret %struct.__neon_int32x2x2_t %tmp2 } @@ -157,8 +157,8 @@ define %struct.__neon_int32x2x2_t @ld2_2s(i32* %A) nounwind { define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind { ; CHECK-LABEL: ld3_2s ; Make sure we are using the operands defined by the ABI -; CHECK ld3.2s { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.2s { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A) ret %struct.__neon_int32x2x3_t %tmp2 } @@ -166,8 +166,8 @@ define %struct.__neon_int32x2x3_t @ld3_2s(i32* %A) nounwind { define %struct.__neon_int32x2x4_t @ld4_2s(i32* %A) nounwind { ; CHECK-LABEL: ld4_2s ; Make sure we are using the operands defined by the ABI -; CHECK ld4.2s { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.2s { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A) ret %struct.__neon_int32x2x4_t %tmp2 } @@ -183,8 +183,8 @@ declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4.v2i32.p0i32(i32*) noun define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind { ; CHECK-LABEL: ld2_4s ; Make sure we are using the operands defined by the ABI -; CHECK ld2.4s { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.4s { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A) ret %struct.__neon_int32x4x2_t %tmp2 } @@ -192,8 +192,8 @@ define %struct.__neon_int32x4x2_t @ld2_4s(i32* %A) nounwind { define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind { ; CHECK-LABEL: ld3_4s ; Make sure we are using the operands defined by the ABI -; CHECK ld3.4s { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.4s { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A) ret %struct.__neon_int32x4x3_t %tmp2 } @@ -201,8 +201,8 @@ define %struct.__neon_int32x4x3_t @ld3_4s(i32* %A) nounwind { define %struct.__neon_int32x4x4_t @ld4_4s(i32* %A) nounwind { ; CHECK-LABEL: ld4_4s ; Make sure we are using the operands defined by the ABI -; CHECK ld4.4s { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.4s { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A) ret %struct.__neon_int32x4x4_t %tmp2 } @@ -218,8 +218,8 @@ declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4.v4i32.p0i32(i32*) noun define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind { ; CHECK-LABEL: ld2_2d ; Make sure we are using the operands defined by the ABI -; CHECK ld2.2d { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2.2d { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A) ret %struct.__neon_int64x2x2_t %tmp2 } @@ -227,8 +227,8 @@ define %struct.__neon_int64x2x2_t @ld2_2d(i64* %A) nounwind { define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind { ; CHECK-LABEL: ld3_2d ; Make sure we are using the operands defined by the ABI -; CHECK ld3.2d { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3.2d { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A) ret %struct.__neon_int64x2x3_t %tmp2 } @@ -236,8 +236,8 @@ define %struct.__neon_int64x2x3_t @ld3_2d(i64* %A) nounwind { define %struct.__neon_int64x2x4_t @ld4_2d(i64* %A) nounwind { ; CHECK-LABEL: ld4_2d ; Make sure we are using the operands defined by the ABI -; CHECK ld4.2d { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4.2d { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A) ret %struct.__neon_int64x2x4_t %tmp2 } @@ -254,8 +254,8 @@ declare %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4.v2i64.p0i64(i64*) noun define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind { ; CHECK-LABEL: ld2_1di64 ; Make sure we are using the operands defined by the ABI -; CHECK ld1.1d { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld1.1d { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A) ret %struct.__neon_int64x1x2_t %tmp2 } @@ -263,8 +263,8 @@ define %struct.__neon_int64x1x2_t @ld2_1di64(i64* %A) nounwind { define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind { ; CHECK-LABEL: ld3_1di64 ; Make sure we are using the operands defined by the ABI -; CHECK ld1.1d { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld1.1d { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A) ret %struct.__neon_int64x1x3_t %tmp2 } @@ -272,8 +272,8 @@ define %struct.__neon_int64x1x3_t @ld3_1di64(i64* %A) nounwind { define %struct.__neon_int64x1x4_t @ld4_1di64(i64* %A) nounwind { ; CHECK-LABEL: ld4_1di64 ; Make sure we are using the operands defined by the ABI -; CHECK ld1.1d { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld1.1d { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A) ret %struct.__neon_int64x1x4_t %tmp2 } @@ -291,8 +291,8 @@ declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4.v1i64.p0i64(i64*) noun define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind { ; CHECK-LABEL: ld2_1df64 ; Make sure we are using the operands defined by the ABI -; CHECK ld1.1d { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld1.1d { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_float64x1x2_t @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A) ret %struct.__neon_float64x1x2_t %tmp2 } @@ -300,8 +300,8 @@ define %struct.__neon_float64x1x2_t @ld2_1df64(double* %A) nounwind { define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind { ; CHECK-LABEL: ld3_1df64 ; Make sure we are using the operands defined by the ABI -; CHECK ld1.1d { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld1.1d { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_float64x1x3_t @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A) ret %struct.__neon_float64x1x3_t %tmp2 } @@ -309,8 +309,8 @@ define %struct.__neon_float64x1x3_t @ld3_1df64(double* %A) nounwind { define %struct.__neon_float64x1x4_t @ld4_1df64(double* %A) nounwind { ; CHECK-LABEL: ld4_1df64 ; Make sure we are using the operands defined by the ABI -; CHECK ld1.1d { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld1.1d { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A) ret %struct.__neon_float64x1x4_t %tmp2 } @@ -323,8 +323,8 @@ declare %struct.__neon_float64x1x4_t @llvm.aarch64.neon.ld4.v1f64.p0f64(double*) define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld2lane_16b -; CHECK ld2.b { v0, v1 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld2.b { v0, v1 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, i64 1, i8* %A) ret %struct.__neon_int8x16x2_t %tmp2 } @@ -332,8 +332,8 @@ define %struct.__neon_int8x16x2_t @ld2lane_16b(<16 x i8> %L1, <16 x i8> %L2, i8* define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i8* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld3lane_16b -; CHECK ld3.b { v0, v1, v2 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld3.b { v0, v1, v2 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, i64 1, i8* %A) ret %struct.__neon_int8x16x3_t %tmp2 } @@ -341,8 +341,8 @@ define %struct.__neon_int8x16x3_t @ld3lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 define %struct.__neon_int8x16x4_t @ld4lane_16b(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i8* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld4lane_16b -; CHECK ld4.b { v0, v1, v2, v3 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld4.b { v0, v1, v2, v3 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %L1, <16 x i8> %L2, <16 x i8> %L3, <16 x i8> %L4, i64 1, i8* %A) ret %struct.__neon_int8x16x4_t %tmp2 } @@ -354,8 +354,8 @@ declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld2lane_8h -; CHECK ld2.h { v0, v1 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld2.h { v0, v1 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, i64 1, i16* %A) ret %struct.__neon_int16x8x2_t %tmp2 } @@ -363,8 +363,8 @@ define %struct.__neon_int16x8x2_t @ld2lane_8h(<8 x i16> %L1, <8 x i16> %L2, i16* define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i16* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld3lane_8h -; CHECK ld3.h { v0, v1, v3 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld3.h { v0, v1, v2 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, i64 1, i16* %A) ret %struct.__neon_int16x8x3_t %tmp2 } @@ -372,8 +372,8 @@ define %struct.__neon_int16x8x3_t @ld3lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x define %struct.__neon_int16x8x4_t @ld4lane_8h(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i16* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld4lane_8h -; CHECK ld4.h { v0, v1, v2, v3 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld4.h { v0, v1, v2, v3 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %L1, <8 x i16> %L2, <8 x i16> %L3, <8 x i16> %L4, i64 1, i16* %A) ret %struct.__neon_int16x8x4_t %tmp2 } @@ -385,8 +385,8 @@ declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld2lane_4s -; CHECK ld2.s { v0, v1 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld2.s { v0, v1 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, i64 1, i32* %A) ret %struct.__neon_int32x4x2_t %tmp2 } @@ -394,8 +394,8 @@ define %struct.__neon_int32x4x2_t @ld2lane_4s(<4 x i32> %L1, <4 x i32> %L2, i32* define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i32* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld3lane_4s -; CHECK ld3.s { v0, v1, v2 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld3.s { v0, v1, v2 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, i64 1, i32* %A) ret %struct.__neon_int32x4x3_t %tmp2 } @@ -403,8 +403,8 @@ define %struct.__neon_int32x4x3_t @ld3lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x define %struct.__neon_int32x4x4_t @ld4lane_4s(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i32* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld4lane_4s -; CHECK ld4.s { v0, v1, v2, v3 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld4.s { v0, v1, v2, v3 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %L1, <4 x i32> %L2, <4 x i32> %L3, <4 x i32> %L4, i64 1, i32* %A) ret %struct.__neon_int32x4x4_t %tmp2 } @@ -416,8 +416,8 @@ declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld2lane_2d -; CHECK ld2.d { v0, v1 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld2.d { v0, v1 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, i64 1, i64* %A) ret %struct.__neon_int64x2x2_t %tmp2 } @@ -425,8 +425,8 @@ define %struct.__neon_int64x2x2_t @ld2lane_2d(<2 x i64> %L1, <2 x i64> %L2, i64* define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld3lane_2d -; CHECK ld3.d { v0, v1, v3 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld3.d { v0, v1, v2 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, i64 1, i64* %A) ret %struct.__neon_int64x2x3_t %tmp2 } @@ -434,8 +434,8 @@ define %struct.__neon_int64x2x3_t @ld3lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x define %struct.__neon_int64x2x4_t @ld4lane_2d(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64* %A) nounwind { ; Make sure we are using the operands defined by the ABI ; CHECK: ld4lane_2d -; CHECK ld4.d { v0, v1, v2, v3 }[1], [x0] -; CHECK-NEXT ret +; CHECK: ld4.d { v0, v1, v2, v3 }[1], [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %L1, <2 x i64> %L2, <2 x i64> %L3, <2 x i64> %L4, i64 1, i64* %A) ret %struct.__neon_int64x2x4_t %tmp2 } @@ -448,7 +448,7 @@ define <8 x i8> @ld1r_8b(i8* %bar) { ; CHECK: ld1r_8b ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.8b { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i8, i8* %bar %tmp2 = insertelement <8 x i8> , i8 %tmp1, i32 0 %tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1 @@ -465,7 +465,7 @@ define <16 x i8> @ld1r_16b(i8* %bar) { ; CHECK: ld1r_16b ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.16b { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i8, i8* %bar %tmp2 = insertelement <16 x i8> , i8 %tmp1, i32 0 %tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1 @@ -490,7 +490,7 @@ define <4 x i16> @ld1r_4h(i16* %bar) { ; CHECK: ld1r_4h ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.4h { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i16, i16* %bar %tmp2 = insertelement <4 x i16> , i16 %tmp1, i32 0 %tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1 @@ -503,7 +503,7 @@ define <8 x i16> @ld1r_8h(i16* %bar) { ; CHECK: ld1r_8h ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.8h { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i16, i16* %bar %tmp2 = insertelement <8 x i16> , i16 %tmp1, i32 0 %tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1 @@ -520,7 +520,7 @@ define <2 x i32> @ld1r_2s(i32* %bar) { ; CHECK: ld1r_2s ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.2s { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i32, i32* %bar %tmp2 = insertelement <2 x i32> , i32 %tmp1, i32 0 %tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1 @@ -531,7 +531,7 @@ define <4 x i32> @ld1r_4s(i32* %bar) { ; CHECK: ld1r_4s ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.4s { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i32, i32* %bar %tmp2 = insertelement <4 x i32> , i32 %tmp1, i32 0 %tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1 @@ -544,7 +544,7 @@ define <2 x i64> @ld1r_2d(i64* %bar) { ; CHECK: ld1r_2d ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.2d { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i64, i64* %bar %tmp2 = insertelement <2 x i64> , i64 %tmp1, i32 0 %tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1 @@ -554,8 +554,8 @@ define <2 x i64> @ld1r_2d(i64* %bar) { define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind { ; CHECK: ld2r_8b ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.8b { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.8b { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x8x2_t @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A) ret %struct.__neon_int8x8x2_t %tmp2 } @@ -563,8 +563,8 @@ define %struct.__neon_int8x8x2_t @ld2r_8b(i8* %A) nounwind { define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind { ; CHECK: ld3r_8b ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.8b { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.8b { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x8x3_t @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A) ret %struct.__neon_int8x8x3_t %tmp2 } @@ -572,8 +572,8 @@ define %struct.__neon_int8x8x3_t @ld3r_8b(i8* %A) nounwind { define %struct.__neon_int8x8x4_t @ld4r_8b(i8* %A) nounwind { ; CHECK: ld4r_8b ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.8b { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A) ret %struct.__neon_int8x8x4_t %tmp2 } @@ -585,8 +585,8 @@ declare %struct.__neon_int8x8x4_t @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8*) nounwin define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind { ; CHECK: ld2r_16b ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.16b { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.16b { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x2_t @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A) ret %struct.__neon_int8x16x2_t %tmp2 } @@ -594,8 +594,8 @@ define %struct.__neon_int8x16x2_t @ld2r_16b(i8* %A) nounwind { define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind { ; CHECK: ld3r_16b ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.16b { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.16b { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x3_t @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A) ret %struct.__neon_int8x16x3_t %tmp2 } @@ -603,8 +603,8 @@ define %struct.__neon_int8x16x3_t @ld3r_16b(i8* %A) nounwind { define %struct.__neon_int8x16x4_t @ld4r_16b(i8* %A) nounwind { ; CHECK: ld4r_16b ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.16b { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A) ret %struct.__neon_int8x16x4_t %tmp2 } @@ -616,8 +616,8 @@ declare %struct.__neon_int8x16x4_t @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8*) nounw define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind { ; CHECK: ld2r_4h ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.4h { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.4h { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x4x2_t @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A) ret %struct.__neon_int16x4x2_t %tmp2 } @@ -625,8 +625,8 @@ define %struct.__neon_int16x4x2_t @ld2r_4h(i16* %A) nounwind { define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind { ; CHECK: ld3r_4h ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.4h { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.4h { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x4x3_t @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A) ret %struct.__neon_int16x4x3_t %tmp2 } @@ -634,8 +634,8 @@ define %struct.__neon_int16x4x3_t @ld3r_4h(i16* %A) nounwind { define %struct.__neon_int16x4x4_t @ld4r_4h(i16* %A) nounwind { ; CHECK: ld4r_4h ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.4h { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A) ret %struct.__neon_int16x4x4_t %tmp2 } @@ -647,8 +647,8 @@ declare %struct.__neon_int16x4x4_t @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16*) nou define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind { ; CHECK: ld2r_8h ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.8h { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.8h { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x2_t @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A) ret %struct.__neon_int16x8x2_t %tmp2 } @@ -656,8 +656,8 @@ define %struct.__neon_int16x8x2_t @ld2r_8h(i16* %A) nounwind { define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind { ; CHECK: ld3r_8h ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.8h { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.8h { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x3_t @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A) ret %struct.__neon_int16x8x3_t %tmp2 } @@ -665,8 +665,8 @@ define %struct.__neon_int16x8x3_t @ld3r_8h(i16* %A) nounwind { define %struct.__neon_int16x8x4_t @ld4r_8h(i16* %A) nounwind { ; CHECK: ld4r_8h ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.8h { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A) ret %struct.__neon_int16x8x4_t %tmp2 } @@ -678,8 +678,8 @@ declare %struct.__neon_int16x8x4_t @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16*) nou define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind { ; CHECK: ld2r_2s ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.2s { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.2s { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x2x2_t @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A) ret %struct.__neon_int32x2x2_t %tmp2 } @@ -687,8 +687,8 @@ define %struct.__neon_int32x2x2_t @ld2r_2s(i32* %A) nounwind { define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind { ; CHECK: ld3r_2s ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.2s { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.2s { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x2x3_t @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A) ret %struct.__neon_int32x2x3_t %tmp2 } @@ -696,8 +696,8 @@ define %struct.__neon_int32x2x3_t @ld3r_2s(i32* %A) nounwind { define %struct.__neon_int32x2x4_t @ld4r_2s(i32* %A) nounwind { ; CHECK: ld4r_2s ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.2s { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A) ret %struct.__neon_int32x2x4_t %tmp2 } @@ -709,8 +709,8 @@ declare %struct.__neon_int32x2x4_t @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32*) nou define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind { ; CHECK: ld2r_4s ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.4s { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.4s { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x2_t @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A) ret %struct.__neon_int32x4x2_t %tmp2 } @@ -718,8 +718,8 @@ define %struct.__neon_int32x4x2_t @ld2r_4s(i32* %A) nounwind { define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind { ; CHECK: ld3r_4s ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.4s { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.4s { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x3_t @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A) ret %struct.__neon_int32x4x3_t %tmp2 } @@ -727,8 +727,8 @@ define %struct.__neon_int32x4x3_t @ld3r_4s(i32* %A) nounwind { define %struct.__neon_int32x4x4_t @ld4r_4s(i32* %A) nounwind { ; CHECK: ld4r_4s ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.4s { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A) ret %struct.__neon_int32x4x4_t %tmp2 } @@ -740,8 +740,8 @@ declare %struct.__neon_int32x4x4_t @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32*) nou define %struct.__neon_int64x1x2_t @ld2r_1d(i64* %A) nounwind { ; CHECK: ld2r_1d ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.1d { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.1d { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x1x2_t @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A) ret %struct.__neon_int64x1x2_t %tmp2 } @@ -749,8 +749,8 @@ define %struct.__neon_int64x1x2_t @ld2r_1d(i64* %A) nounwind { define %struct.__neon_int64x1x3_t @ld3r_1d(i64* %A) nounwind { ; CHECK: ld3r_1d ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.1d { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.1d { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x1x3_t @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A) ret %struct.__neon_int64x1x3_t %tmp2 } @@ -758,8 +758,8 @@ define %struct.__neon_int64x1x3_t @ld3r_1d(i64* %A) nounwind { define %struct.__neon_int64x1x4_t @ld4r_1d(i64* %A) nounwind { ; CHECK: ld4r_1d ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.1d { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A) ret %struct.__neon_int64x1x4_t %tmp2 } @@ -771,8 +771,8 @@ declare %struct.__neon_int64x1x4_t @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64*) nou define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind { ; CHECK: ld2r_2d ; Make sure we are using the operands defined by the ABI -; CHECK ld2r.2d { v0, v1 }, [x0] -; CHECK-NEXT ret +; CHECK: ld2r.2d { v0, v1 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x2_t @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A) ret %struct.__neon_int64x2x2_t %tmp2 } @@ -780,8 +780,8 @@ define %struct.__neon_int64x2x2_t @ld2r_2d(i64* %A) nounwind { define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind { ; CHECK: ld3r_2d ; Make sure we are using the operands defined by the ABI -; CHECK ld3r.2d { v0, v1, v2 }, [x0] -; CHECK-NEXT ret +; CHECK: ld3r.2d { v0, v1, v2 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x3_t @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A) ret %struct.__neon_int64x2x3_t %tmp2 } @@ -789,8 +789,8 @@ define %struct.__neon_int64x2x3_t @ld3r_2d(i64* %A) nounwind { define %struct.__neon_int64x2x4_t @ld4r_2d(i64* %A) nounwind { ; CHECK: ld4r_2d ; Make sure we are using the operands defined by the ABI -; CHECK ld4r.2d { v0, v1, v2, v3 }, [x0] -; CHECK-NEXT ret +; CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0] +; CHECK-NEXT: ret %tmp2 = call %struct.__neon_int64x2x4_t @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A) ret %struct.__neon_int64x2x4_t %tmp2 } @@ -803,7 +803,7 @@ define <16 x i8> @ld1_16b(<16 x i8> %V, i8* %bar) { ; CHECK-LABEL: ld1_16b ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.b { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i8, i8* %bar %tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0 ret <16 x i8> %tmp2 @@ -813,7 +813,7 @@ define <8 x i16> @ld1_8h(<8 x i16> %V, i16* %bar) { ; CHECK-LABEL: ld1_8h ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.h { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i16, i16* %bar %tmp2 = insertelement <8 x i16> %V, i16 %tmp1, i32 0 ret <8 x i16> %tmp2 @@ -823,7 +823,7 @@ define <4 x i32> @ld1_4s(<4 x i32> %V, i32* %bar) { ; CHECK-LABEL: ld1_4s ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.s { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i32, i32* %bar %tmp2 = insertelement <4 x i32> %V, i32 %tmp1, i32 0 ret <4 x i32> %tmp2 @@ -833,7 +833,7 @@ define <4 x float> @ld1_4s_float(<4 x float> %V, float* %bar) { ; CHECK-LABEL: ld1_4s_float: ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.s { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load float, float* %bar %tmp2 = insertelement <4 x float> %V, float %tmp1, i32 0 ret <4 x float> %tmp2 @@ -843,7 +843,7 @@ define <2 x i64> @ld1_2d(<2 x i64> %V, i64* %bar) { ; CHECK-LABEL: ld1_2d ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.d { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i64, i64* %bar %tmp2 = insertelement <2 x i64> %V, i64 %tmp1, i32 0 ret <2 x i64> %tmp2 @@ -853,7 +853,7 @@ define <2 x double> @ld1_2d_double(<2 x double> %V, double* %bar) { ; CHECK-LABEL: ld1_2d_double: ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.d { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load double, double* %bar %tmp2 = insertelement <2 x double> %V, double %tmp1, i32 0 ret <2 x double> %tmp2 @@ -872,7 +872,7 @@ define <8 x i8> @ld1_8b(<8 x i8> %V, i8* %bar) { ; CHECK-LABEL: ld1_8b ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.b { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i8, i8* %bar %tmp2 = insertelement <8 x i8> %V, i8 %tmp1, i32 0 ret <8 x i8> %tmp2 @@ -882,7 +882,7 @@ define <4 x i16> @ld1_4h(<4 x i16> %V, i16* %bar) { ; CHECK-LABEL: ld1_4h ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.h { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i16, i16* %bar %tmp2 = insertelement <4 x i16> %V, i16 %tmp1, i32 0 ret <4 x i16> %tmp2 @@ -892,7 +892,7 @@ define <2 x i32> @ld1_2s(<2 x i32> %V, i32* %bar) { ; CHECK-LABEL: ld1_2s: ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.s { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load i32, i32* %bar %tmp2 = insertelement <2 x i32> %V, i32 %tmp1, i32 0 ret <2 x i32> %tmp2 @@ -902,7 +902,7 @@ define <2 x float> @ld1_2s_float(<2 x float> %V, float* %bar) { ; CHECK-LABEL: ld1_2s_float: ; Make sure we are using the operands defined by the ABI ; CHECK: ld1.s { v0 }[0], [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp1 = load float, float* %bar %tmp2 = insertelement <2 x float> %V, float %tmp1, i32 0 ret <2 x float> %tmp2 @@ -945,7 +945,7 @@ entry: ; CHECK-LABEL: ld1r_4s_float ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.4s { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load float, float* %x, align 4 %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0 %tmp2 = insertelement <4 x float> %tmp1, float %tmp, i32 1 @@ -959,7 +959,7 @@ entry: ; CHECK-LABEL: ld1r_2s_float ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.2s { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load float, float* %x, align 4 %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0 %tmp2 = insertelement <2 x float> %tmp1, float %tmp, i32 1 @@ -971,7 +971,7 @@ entry: ; CHECK-LABEL: ld1r_2d_double ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.2d { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load double, double* %x, align 4 %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0 %tmp2 = insertelement <2 x double> %tmp1, double %tmp, i32 1 @@ -983,7 +983,7 @@ entry: ; CHECK-LABEL: ld1r_1d_double ; Make sure we are using the operands defined by the ABI ; CHECK: ldr d0, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load double, double* %x, align 4 %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0 ret <1 x double> %tmp1 @@ -994,7 +994,7 @@ entry: ; CHECK-LABEL: ld1r_4s_float_shuff ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.4s { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load float, float* %x, align 4 %tmp1 = insertelement <4 x float> undef, float %tmp, i32 0 %lane = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer @@ -1006,7 +1006,7 @@ entry: ; CHECK-LABEL: ld1r_2s_float_shuff ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.2s { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load float, float* %x, align 4 %tmp1 = insertelement <2 x float> undef, float %tmp, i32 0 %lane = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer @@ -1018,7 +1018,7 @@ entry: ; CHECK-LABEL: ld1r_2d_double_shuff ; Make sure we are using the operands defined by the ABI ; CHECK: ld1r.2d { v0 }, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load double, double* %x, align 4 %tmp1 = insertelement <2 x double> undef, double %tmp, i32 0 %lane = shufflevector <2 x double> %tmp1, <2 x double> undef, <2 x i32> zeroinitializer @@ -1030,7 +1030,7 @@ entry: ; CHECK-LABEL: ld1r_1d_double_shuff ; Make sure we are using the operands defined by the ABI ; CHECK: ldr d0, [x0] -; CHECK-NEXT ret +; CHECK-NEXT: ret %tmp = load double, double* %x, align 4 %tmp1 = insertelement <1 x double> undef, double %tmp, i32 0 %lane = shufflevector <1 x double> %tmp1, <1 x double> undef, <1 x i32> zeroinitializer diff --git a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll index ee8f1522f23..fac5f8ad2e9 100644 --- a/test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ b/test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -9,8 +9,8 @@ ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s ; CHECK: ********** MI Scheduling ********** ; CHECK: main:BB#2 -; CHECK LDR -; CHECK Latency : 4 +; CHECK: LDR +; CHECK: Latency : 4 ; CHECK: *** Final schedule for BB#2 *** ; CHECK: LDR ; CHECK: LDR diff --git a/test/CodeGen/AArch64/arm64-st1.ll b/test/CodeGen/AArch64/arm64-st1.ll index 4b133c4b4e4..0387a91ea0e 100644 --- a/test/CodeGen/AArch64/arm64-st1.ll +++ b/test/CodeGen/AArch64/arm64-st1.ll @@ -374,21 +374,21 @@ declare void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64>, <2 x i64>, <2 x i define void @st2_8b(<8 x i8> %A, <8 x i8> %B, i8* %P) nounwind { ; CHECK-LABEL: st2_8b -; CHECK st2.8b +; CHECK: st2.8b call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, i8* %P) ret void } define void @st3_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %P) nounwind { ; CHECK-LABEL: st3_8b -; CHECK st3.8b +; CHECK: st3.8b call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, i8* %P) ret void } define void @st4_8b(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P) nounwind { ; CHECK-LABEL: st4_8b -; CHECK st4.8b +; CHECK: st4.8b call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %A, <8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %P) ret void } @@ -399,21 +399,21 @@ declare void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8>, <8 x i8>, <8 x i8>, <8 x define void @st2_16b(<16 x i8> %A, <16 x i8> %B, i8* %P) nounwind { ; CHECK-LABEL: st2_16b -; CHECK st2.16b +; CHECK: st2.16b call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, i8* %P) ret void } define void @st3_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %P) nounwind { ; CHECK-LABEL: st3_16b -; CHECK st3.16b +; CHECK: st3.16b call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, i8* %P) ret void } define void @st4_16b(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %P) nounwind { ; CHECK-LABEL: st4_16b -; CHECK st4.16b +; CHECK: st4.16b call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %P) ret void } @@ -424,21 +424,21 @@ declare void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8>, <16 x i8>, <16 x i8>, define void @st2_4h(<4 x i16> %A, <4 x i16> %B, i16* %P) nounwind { ; CHECK-LABEL: st2_4h -; CHECK st2.4h +; CHECK: st2.4h call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, i16* %P) ret void } define void @st3_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %P) nounwind { ; CHECK-LABEL: st3_4h -; CHECK st3.4h +; CHECK: st3.4h call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, i16* %P) ret void } define void @st4_4h(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %P) nounwind { ; CHECK-LABEL: st4_4h -; CHECK st4.4h +; CHECK: st4.4h call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %A, <4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %P) ret void } @@ -449,21 +449,21 @@ declare void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16>, <4 x i16>, <4 x i16>, define void @st2_8h(<8 x i16> %A, <8 x i16> %B, i16* %P) nounwind { ; CHECK-LABEL: st2_8h -; CHECK st2.8h +; CHECK: st2.8h call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, i16* %P) ret void } define void @st3_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %P) nounwind { ; CHECK-LABEL: st3_8h -; CHECK st3.8h +; CHECK: st3.8h call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, i16* %P) ret void } define void @st4_8h(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %P) nounwind { ; CHECK-LABEL: st4_8h -; CHECK st4.8h +; CHECK: st4.8h call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %A, <8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %P) ret void } @@ -474,21 +474,21 @@ declare void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16>, <8 x i16>, <8 x i16>, define void @st2_2s(<2 x i32> %A, <2 x i32> %B, i32* %P) nounwind { ; CHECK-LABEL: st2_2s -; CHECK st2.2s +; CHECK: st2.2s call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, i32* %P) ret void } define void @st3_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %P) nounwind { ; CHECK-LABEL: st3_2s -; CHECK st3.2s +; CHECK: st3.2s call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, i32* %P) ret void } define void @st4_2s(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %P) nounwind { ; CHECK-LABEL: st4_2s -; CHECK st4.2s +; CHECK: st4.2s call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %A, <2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %P) ret void } @@ -499,21 +499,21 @@ declare void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32>, <2 x i32>, <2 x i32>, define void @st2_4s(<4 x i32> %A, <4 x i32> %B, i32* %P) nounwind { ; CHECK-LABEL: st2_4s -; CHECK st2.4s +; CHECK: st2.4s call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, i32* %P) ret void } define void @st3_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %P) nounwind { ; CHECK-LABEL: st3_4s -; CHECK st3.4s +; CHECK: st3.4s call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, i32* %P) ret void } define void @st4_4s(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %P) nounwind { ; CHECK-LABEL: st4_4s -; CHECK st4.4s +; CHECK: st4.4s call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %A, <4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %P) ret void } @@ -522,23 +522,24 @@ declare void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32>, <4 x i32>, i32*) noun declare void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly declare void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, i32*) nounwind readonly +; If there's only one element, st2/3/4 don't make much sense, stick to st1. define void @st2_1d(<1 x i64> %A, <1 x i64> %B, i64* %P) nounwind { ; CHECK-LABEL: st2_1d -; CHECK st1.2d +; CHECK: st1.1d call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, i64* %P) ret void } define void @st3_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %P) nounwind { ; CHECK-LABEL: st3_1d -; CHECK st1.3d +; CHECK: st1.1d call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, i64* %P) ret void } define void @st4_1d(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %P) nounwind { ; CHECK-LABEL: st4_1d -; CHECK st1.4d +; CHECK: st1.1d call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %A, <1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %P) ret void } @@ -549,21 +550,21 @@ declare void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, define void @st2_2d(<2 x i64> %A, <2 x i64> %B, i64* %P) nounwind { ; CHECK-LABEL: st2_2d -; CHECK st2.2d +; CHECK: st2.2d call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, i64* %P) ret void } define void @st3_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P) nounwind { ; CHECK-LABEL: st3_2d -; CHECK st2.3d +; CHECK: st3.2d call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, i64* %P) ret void } define void @st4_2d(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P) nounwind { ; CHECK-LABEL: st4_2d -; CHECK st2.4d +; CHECK: st4.2d call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %A, <2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %P) ret void } diff --git a/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll b/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll index 013e16e2a73..6b41141163a 100644 --- a/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll +++ b/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll @@ -12,7 +12,7 @@ define <2 x i64> @test_add(<2 x i64> %x, <2 x i64> %y) nounwind { define <2 x i64> @increment_by_one(<2 x i64> %x) nounwind { %result = add <2 x i64> %x, ret <2 x i64> %result -; CHECK vaddudm 2, 2, 3 +; CHECK: vaddudm 2, 2, 3 } define <2 x i64> @increment_by_val(<2 x i64> %x, i64 %val) nounwind { @@ -47,7 +47,7 @@ define <2 x i64> @test_sub(<2 x i64> %x, <2 x i64> %y) nounwind { define <2 x i64> @decrement_by_one(<2 x i64> %x) nounwind { %result = sub <2 x i64> %x, ret <2 x i64> %result -; CHECK vsubudm 2, 2, 3 +; CHECK: vsubudm 2, 2, 3 } define <2 x i64> @decrement_by_val(<2 x i64> %x, i64 %val) nounwind { @@ -55,7 +55,7 @@ define <2 x i64> @decrement_by_val(<2 x i64> %x, i64 %val) nounwind { %tmpvec2 = insertelement <2 x i64> %tmpvec, i64 %val, i32 1 %result = sub <2 x i64> %x, %tmpvec2 ret <2 x i64> %result -; CHECK vsubudm 2, 2, 3 +; CHECK: vsubudm 2, 2, 3 } diff --git a/test/CodeGen/X86/avx-basic.ll b/test/CodeGen/X86/avx-basic.ll index 683f43b19d0..6857bb8bd11 100644 --- a/test/CodeGen/X86/avx-basic.ll +++ b/test/CodeGen/X86/avx-basic.ll @@ -94,7 +94,7 @@ entry: ; PR22685 ; CHECK: mov00 -; CHECK vmovss +; CHECK: vmovss define <8 x float> @mov00_8f32(float* %ptr) { %val = load float, float* %ptr %vec = insertelement <8 x float> zeroinitializer, float %val, i32 0 diff --git a/test/Instrumentation/BoundsChecking/simple.ll b/test/Instrumentation/BoundsChecking/simple.ll index 7afc4e75bfe..5fae2f48c28 100644 --- a/test/Instrumentation/BoundsChecking/simple.ll +++ b/test/Instrumentation/BoundsChecking/simple.ll @@ -67,7 +67,7 @@ define void @f5_as1(i64 %x) nounwind { ; CHECK: @f5_as1 %idx = getelementptr inbounds [8 x i8], [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x ; CHECK: sub i16 - ; CHECK icmp ult i16 + ; CHECK: icmp ult i16 ; CHECK: trap %1 = load i8, i8 addrspace(1)* %idx, align 4 ret void diff --git a/test/MC/Disassembler/ARM/neont2.txt b/test/MC/Disassembler/ARM/neont2.txt index e493fbaefc5..536095f6a37 100644 --- a/test/MC/Disassembler/ARM/neont2.txt +++ b/test/MC/Disassembler/ARM/neont2.txt @@ -1631,11 +1631,11 @@ # rdar://10798451 0xe7 0xf9 0x32 0x1d -# CHECK vld2.8 {d17[], d19[]}, [r7:16], r2 +# CHECK: vld2.8 {d17[], d19[]}, [r7:16], r2 0xe7 0xf9 0x3d 0x1d -# CHECK vld2.8 {d17[], d19[]}, [r7:16]! +# CHECK: vld2.8 {d17[], d19[]}, [r7:16]! 0xe7 0xf9 0x3f 0x1d -# CHECK vld2.8 {d17[], d19[]}, [r7:16] +# CHECK: vld2.8 {d17[], d19[]}, [r7:16] # rdar://11034702 0x04 0xf9 0x0d 0x87 diff --git a/test/Transforms/InstSimplify/compare.ll b/test/Transforms/InstSimplify/compare.ll index 376f6ea730b..07c90d8f1eb 100644 --- a/test/Transforms/InstSimplify/compare.ll +++ b/test/Transforms/InstSimplify/compare.ll @@ -918,7 +918,7 @@ define i1 @nonnull_deref_as_arg(i32 addrspace(1)* dereferenceable(4) %i) { ret i1 %cmp ; CHECK-LABEL: @nonnull_deref_as_arg ; CHECK: icmp -; CHECK ret +; CHECK: ret } declare nonnull i32* @returns_nonnull_helper()