diff --git a/test/CodeGen/ARM64/csel.ll b/test/CodeGen/ARM64/csel.ll index 9b42858558b..98eba30f119 100644 --- a/test/CodeGen/ARM64/csel.ll +++ b/test/CodeGen/ARM64/csel.ll @@ -79,9 +79,9 @@ define i32 @foo7(i32 %a, i32 %b) nounwind { entry: ; CHECK-LABEL: foo7: ; CHECK: sub -; CHECK-NEXT: adds -; CHECK-NEXT: csneg -; CHECK-NEXT: b +; CHECK-next: adds +; CHECK-next: csneg +; CHECK-next: b %sub = sub nsw i32 %a, %b %cmp = icmp sgt i32 %sub, -1 %sub3 = sub nsw i32 0, %sub diff --git a/test/CodeGen/ARM64/vmul.ll b/test/CodeGen/ARM64/vmul.ll index 9d08b9dc347..b6bd16ac0b4 100644 --- a/test/CodeGen/ARM64/vmul.ll +++ b/test/CodeGen/ARM64/vmul.ll @@ -1201,35 +1201,35 @@ define <2 x i64> @umlsl_lane_2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nou ; Scalar FMULX define float @fmulxs(float %a, float %b) nounwind { ; CHECK-LABEL: fmulxs: -; CHECK-NEXT: fmulx s0, s0, s1 +; CHECKNEXT: fmulx s0, s0, s1 %fmulx.i = tail call float @llvm.arm64.neon.fmulx.f32(float %a, float %b) nounwind -; CHECK-NEXT: ret +; CHECKNEXT: ret ret float %fmulx.i } define double @fmulxd(double %a, double %b) nounwind { ; CHECK-LABEL: fmulxd: -; CHECK-NEXT: fmulx d0, d0, d1 +; CHECKNEXT: fmulx d0, d0, d1 %fmulx.i = tail call double @llvm.arm64.neon.fmulx.f64(double %a, double %b) nounwind -; CHECK-NEXT: ret +; CHECKNEXT: ret ret double %fmulx.i } define float @fmulxs_lane(float %a, <4 x float> %vec) nounwind { ; CHECK-LABEL: fmulxs_lane: -; CHECK-NEXT: fmulx.s s0, s0, v1[3] +; CHECKNEXT: fmulx.s s0, s0, v1[3] %b = extractelement <4 x float> %vec, i32 3 %fmulx.i = tail call float @llvm.arm64.neon.fmulx.f32(float %a, float %b) nounwind -; CHECK-NEXT: ret +; CHECKNEXT: ret ret float %fmulx.i } define double @fmulxd_lane(double %a, <2 x double> %vec) nounwind { ; CHECK-LABEL: fmulxd_lane: -; CHECK-NEXT: fmulx d0, d0, v1[1] +; CHECKNEXT: fmulx d0, d0, v1[1] %b = extractelement <2 x double> %vec, i32 1 %fmulx.i = tail call double @llvm.arm64.neon.fmulx.f64(double %a, double %b) nounwind -; CHECK-NEXT: ret +; CHECKNEXT: ret ret double %fmulx.i }