diff --git a/test/CodeGen/SystemZ/buildvector-00.ll b/test/CodeGen/SystemZ/buildvector-00.ll index 463da0603ee..13360b2e577 100644 --- a/test/CodeGen/SystemZ/buildvector-00.ll +++ b/test/CodeGen/SystemZ/buildvector-00.ll @@ -4,12 +4,12 @@ ; Test that the dag combiner can understand that some vector operands are ; all-zeros and then optimize the logical operations. -define void @f1() { +define void @f1(<2 x i64> %a0) { ; CHECK-LABEL: f1: ; CHECK: # %bb.0: # %bb ; CHECK-NEXT: vlrepg %v0, 0(%r1) ; CHECK-NEXT: vgbm %v1, 0 -; CHECK-NEXT: vceqg %v2, %v0, %v1 +; CHECK-NEXT: vceqg %v2, %v24, %v1 ; CHECK-NEXT: vn %v0, %v0, %v0 ; CHECK-NEXT: vno %v2, %v2, %v2 ; CHECK-NEXT: vceqg %v0, %v0, %v1 @@ -26,13 +26,13 @@ bb: bb1: ; preds = %bb %tmp2 = load i64, i64* undef, align 8 %tmp3 = insertelement <2 x i64> undef, i64 %tmp2, i32 1 - %tmp4 = icmp ne <2 x i64> undef, zeroinitializer + %tmp4 = icmp ne <2 x i64> %a0, zeroinitializer %tmp5 = xor <2 x i1> %tmp4, zeroinitializer %tmp6 = xor <2 x i1> zeroinitializer, %tmp5 %tmp7 = and <2 x i64> %tmp3, %tmp %tmp8 = icmp ne <2 x i64> %tmp7, zeroinitializer %tmp9 = xor <2 x i1> zeroinitializer, %tmp8 - %tmp10 = icmp ne <2 x i64> undef, zeroinitializer + %tmp10 = icmp ne <2 x i64> %a0, zeroinitializer %tmp11 = xor <2 x i1> %tmp10, %tmp9 %tmp12 = and <2 x i1> %tmp6, %tmp11 %tmp13 = extractelement <2 x i1> %tmp12, i32 0 diff --git a/test/CodeGen/SystemZ/dag-combine-03.ll b/test/CodeGen/SystemZ/dag-combine-03.ll index 379d53ab5c4..3625ac68b32 100644 --- a/test/CodeGen/SystemZ/dag-combine-03.ll +++ b/test/CodeGen/SystemZ/dag-combine-03.ll @@ -10,7 +10,7 @@ ; handled so that the AND is removed. If this succeeds, this results in a CHI ; instead of TMLL. -define void @fun() { +define void @fun(i64 %a0) { ; CHECK-LABEL: fun: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lghi %r1, 0 @@ -18,13 +18,13 @@ define void @fun() { ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: la %r0, 2(%r1) ; CHECK-NEXT: la %r1, 1(%r1) -; CHECK-NEXT: cgr %r1, %r0 -; CHECK-NEXT: lhi %r2, 0 -; CHECK-NEXT: lochie %r2, 1 -; CHECK-NEXT: cgr %r0, %r0 +; CHECK-NEXT: cgr %r1, %r2 +; CHECK-NEXT: lhi %r3, 0 +; CHECK-NEXT: lochie %r3, 1 +; CHECK-NEXT: cgr %r0, %r2 ; CHECK-NEXT: lhi %r0, 0 ; CHECK-NEXT: lochie %r0, 1 -; CHECK-NEXT: vlvgp %v0, %r2, %r2 +; CHECK-NEXT: vlvgp %v0, %r3, %r3 ; CHECK-NEXT: vlvgp %v1, %r0, %r0 ; CHECK-NEXT: vx %v0, %v0, %v1 ; CHECK-NEXT: vlgvf %r0, %v0, 1 @@ -38,8 +38,8 @@ lab0: %phi = phi i64 [ %sel, %lab0 ], [ 0, %entry ] %add = add nuw nsw i64 %phi, 1 %add2 = add nuw nsw i64 %phi, 2 - %cmp = icmp eq i64 %add, undef - %cmp2 = icmp eq i64 %add2, undef + %cmp = icmp eq i64 %add, %a0 + %cmp2 = icmp eq i64 %add2, %a0 %ins = insertelement <2 x i1> undef, i1 %cmp, i32 0 %ins2 = insertelement <2 x i1> undef, i1 %cmp2, i32 0 %xor = xor <2 x i1> %ins, %ins2