mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
ab043ff680
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794
42 lines
1.9 KiB
LLVM
42 lines
1.9 KiB
LLVM
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
; RUN: opt < %s -bb-vectorize -bb-vectorize-req-chain-depth=3 -bb-vectorize-ignore-target-info -instcombine -gvn -S | FileCheck %s
|
|
|
|
define double @test1(double* %a, double* %b, double* %c) nounwind uwtable readonly {
|
|
entry:
|
|
%i0 = load double, double* %a, align 8
|
|
%i1 = load double, double* %b, align 8
|
|
%mul = fmul double %i0, %i1
|
|
%i2 = load double, double* %c, align 8
|
|
%add = fadd double %mul, %i2
|
|
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
|
|
%i3 = load double, double* %arrayidx3, align 8
|
|
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
|
|
%i4 = load double, double* %arrayidx4, align 8
|
|
%mul5 = fmul double %i3, %i4
|
|
%arrayidx6 = getelementptr inbounds double, double* %c, i64 1
|
|
%i5 = load double, double* %arrayidx6, align 8
|
|
%add7 = fadd double %mul5, %i5
|
|
%mul9 = fmul double %add, %i1
|
|
%add11 = fadd double %mul9, %i2
|
|
%mul13 = fmul double %add7, %i4
|
|
%add15 = fadd double %mul13, %i5
|
|
%mul16 = fmul double %add11, %add15
|
|
ret double %mul16
|
|
; CHECK-LABEL: @test1(
|
|
; CHECK: %i0.v.i0 = bitcast double* %a to <2 x double>*
|
|
; CHECK: %i1.v.i0 = bitcast double* %b to <2 x double>*
|
|
; CHECK: %i2.v.i0 = bitcast double* %c to <2 x double>*
|
|
; CHECK: %i0 = load <2 x double>, <2 x double>* %i0.v.i0, align 8
|
|
; CHECK: %i1 = load <2 x double>, <2 x double>* %i1.v.i0, align 8
|
|
; CHECK: %mul = fmul <2 x double> %i0, %i1
|
|
; CHECK: %i2 = load <2 x double>, <2 x double>* %i2.v.i0, align 8
|
|
; CHECK: %add = fadd <2 x double> %mul, %i2
|
|
; CHECK: %mul9 = fmul <2 x double> %add, %i1
|
|
; CHECK: %add11 = fadd <2 x double> %mul9, %i2
|
|
; CHECK: %add11.v.r1 = extractelement <2 x double> %add11, i32 0
|
|
; CHECK: %add11.v.r2 = extractelement <2 x double> %add11, i32 1
|
|
; CHECK: %mul16 = fmul double %add11.v.r1, %add11.v.r2
|
|
; CHECK: ret double %mul16
|
|
}
|
|
|