mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 19:52:54 +01:00
ARM64: add support for AArch64's addsub_ext.ll
There was one definite issue in ARM64 (the off-by-1 check for whether a shift could be folded in) and one difference that is probably correct: ARM64 didn't fold nodes with multiple uses into the arithmetic operations unless optimising for code size. llvm-svn: 206168
This commit is contained in:
parent
614708ff8e
commit
0f5179b30d
@ -532,7 +532,7 @@ bool ARM64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
|
||||
if (!CSD)
|
||||
return false;
|
||||
ShiftVal = CSD->getZExtValue();
|
||||
if ((ShiftVal & 0x3) != ShiftVal)
|
||||
if (ShiftVal > 4)
|
||||
return false;
|
||||
|
||||
Ext = getExtendTypeForNode(N.getOperand(0));
|
||||
|
@ -1,11 +1,12 @@
|
||||
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
|
||||
; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64 | FileCheck %s
|
||||
|
||||
@var8 = global i8 0
|
||||
@var16 = global i16 0
|
||||
@var32 = global i32 0
|
||||
@var64 = global i64 0
|
||||
|
||||
define void @addsub_i8rhs() {
|
||||
define void @addsub_i8rhs() minsize {
|
||||
; CHECK-LABEL: addsub_i8rhs:
|
||||
%val8_tmp = load i8* @var8
|
||||
%lhs32 = load i32* @var32
|
||||
@ -80,7 +81,7 @@ end:
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @addsub_i16rhs() {
|
||||
define void @addsub_i16rhs() minsize {
|
||||
; CHECK-LABEL: addsub_i16rhs:
|
||||
%val16_tmp = load i16* @var16
|
||||
%lhs32 = load i32* @var32
|
||||
@ -158,7 +159,7 @@ end:
|
||||
; N.b. we could probably check more here ("add w2, w3, w1, uxtw" for
|
||||
; example), but the remaining instructions are probably not idiomatic
|
||||
; in the face of "add/sub (shifted register)" so I don't intend to.
|
||||
define void @addsub_i32rhs() {
|
||||
define void @addsub_i32rhs() minsize {
|
||||
; CHECK-LABEL: addsub_i32rhs:
|
||||
%val32_tmp = load i32* @var32
|
||||
%lhs64 = load i64* @var64
|
||||
|
Loading…
Reference in New Issue
Block a user