mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
LSV: Fix incorrectly increasing alignment
If the unaligned access has a dynamic offset, it may be odd which would make the adjusted alignment incorrect to use. llvm-svn: 281110
This commit is contained in:
parent
7a01c4cc95
commit
e90cd7a703
@ -31,6 +31,7 @@
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Vectorize.h"
|
||||
|
||||
using namespace llvm;
|
||||
@ -742,7 +743,8 @@ bool Vectorizer::vectorizeStoreChain(
|
||||
|
||||
// Store size should be 1B, 2B or multiple of 4B.
|
||||
// TODO: Target hook for size constraint?
|
||||
unsigned SzInBytes = (Sz / 8) * ChainSize;
|
||||
unsigned EltSzInBytes = Sz / 8;
|
||||
unsigned SzInBytes = EltSzInBytes * ChainSize;
|
||||
if (SzInBytes > 2 && SzInBytes % 4 != 0) {
|
||||
DEBUG(dbgs() << "LSV: Size should be 1B, 2B "
|
||||
"or multiple of 4B. Splitting.\n");
|
||||
@ -790,15 +792,11 @@ bool Vectorizer::vectorizeStoreChain(
|
||||
if (S0->getPointerAddressSpace() != 0)
|
||||
return false;
|
||||
|
||||
// If we're storing to an object on the stack, we control its alignment,
|
||||
// so we can cheat and change it!
|
||||
Value *V = GetUnderlyingObject(S0->getPointerOperand(), DL);
|
||||
if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) {
|
||||
AI->setAlignment(StackAdjustedAlignment);
|
||||
Alignment = StackAdjustedAlignment;
|
||||
} else {
|
||||
unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(),
|
||||
StackAdjustedAlignment,
|
||||
DL, S0, nullptr, &DT);
|
||||
if (NewAlign < StackAdjustedAlignment)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
BasicBlock::iterator First, Last;
|
||||
@ -899,7 +897,8 @@ bool Vectorizer::vectorizeLoadChain(
|
||||
|
||||
// Load size should be 1B, 2B or multiple of 4B.
|
||||
// TODO: Should size constraint be a target hook?
|
||||
unsigned SzInBytes = (Sz / 8) * ChainSize;
|
||||
unsigned EltSzInBytes = Sz / 8;
|
||||
unsigned SzInBytes = EltSzInBytes * ChainSize;
|
||||
if (SzInBytes > 2 && SzInBytes % 4 != 0) {
|
||||
DEBUG(dbgs() << "LSV: Size should be 1B, 2B "
|
||||
"or multiple of 4B. Splitting.\n");
|
||||
@ -940,15 +939,13 @@ bool Vectorizer::vectorizeLoadChain(
|
||||
if (L0->getPointerAddressSpace() != 0)
|
||||
return false;
|
||||
|
||||
// If we're loading from an object on the stack, we control its alignment,
|
||||
// so we can cheat and change it!
|
||||
Value *V = GetUnderlyingObject(L0->getPointerOperand(), DL);
|
||||
if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(V)) {
|
||||
AI->setAlignment(StackAdjustedAlignment);
|
||||
Alignment = StackAdjustedAlignment;
|
||||
} else {
|
||||
unsigned NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(),
|
||||
StackAdjustedAlignment,
|
||||
DL, L0, nullptr, &DT);
|
||||
if (NewAlign < StackAdjustedAlignment)
|
||||
return false;
|
||||
}
|
||||
|
||||
Alignment = NewAlign;
|
||||
}
|
||||
|
||||
DEBUG({
|
||||
@ -1029,6 +1026,7 @@ bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
|
||||
unsigned Alignment) {
|
||||
if (Alignment % SzInBytes == 0)
|
||||
return false;
|
||||
|
||||
bool Fast = false;
|
||||
bool Allows = TTI.allowsMisalignedMemoryAccesses(F.getParent()->getContext(),
|
||||
SzInBytes * 8, AddressSpace,
|
||||
|
@ -0,0 +1,129 @@
|
||||
; RUN: opt -S -load-store-vectorizer -mattr=-unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=ALIGNED -check-prefix=ALL %s
|
||||
; RUN: opt -S -load-store-vectorizer -mattr=+unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=UNALIGNED -check-prefix=ALL %s
|
||||
|
||||
target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
|
||||
target triple = "amdgcn--"
|
||||
|
||||
; ALL-LABEL: @load_unknown_offset_align1_i8(
|
||||
; ALL: alloca [128 x i8], align 1
|
||||
; UNALIGNED: load <2 x i8>, <2 x i8>* %{{[0-9]+}}, align 1{{$}}
|
||||
|
||||
; ALIGNED: load i8, i8* %ptr0, align 1{{$}}
|
||||
; ALIGNED: load i8, i8* %ptr1, align 1{{$}}
|
||||
define void @load_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i8], align 1
|
||||
%ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset
|
||||
%val0 = load i8, i8* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i8, i8* %ptr0, i32 1
|
||||
%val1 = load i8, i8* %ptr1, align 1
|
||||
%add = add i8 %val0, %val1
|
||||
store i8 %add, i8 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL-LABEL: @load_unknown_offset_align1_i16(
|
||||
; ALL: alloca [128 x i16], align 1{{$}}
|
||||
; UNALIGNED: load <2 x i16>, <2 x i16>* %{{[0-9]+}}, align 1{{$}}
|
||||
|
||||
; ALIGNED: load i16, i16* %ptr0, align 1{{$}}
|
||||
; ALIGNED: load i16, i16* %ptr1, align 1{{$}}
|
||||
define void @load_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i16], align 1
|
||||
%ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset
|
||||
%val0 = load i16, i16* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr0, i32 1
|
||||
%val1 = load i16, i16* %ptr1, align 1
|
||||
%add = add i16 %val0, %val1
|
||||
store i16 %add, i16 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; FIXME: Although the offset is unknown here, we know it is a multiple
|
||||
; of the element size, so should still be align 4
|
||||
|
||||
; ALL-LABEL: @load_unknown_offset_align1_i32(
|
||||
; ALL: alloca [128 x i32], align 1
|
||||
; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
|
||||
|
||||
; ALIGNED: load i32, i32* %ptr0, align 1
|
||||
; ALIGNED: load i32, i32* %ptr1, align 1
|
||||
define void @load_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i32], align 1
|
||||
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
|
||||
%val0 = load i32, i32* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1
|
||||
%val1 = load i32, i32* %ptr1, align 1
|
||||
%add = add i32 %val0, %val1
|
||||
store i32 %add, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; FIXME: Should always increase alignment of the load
|
||||
; Make sure alloca alignment isn't decreased
|
||||
; ALL-LABEL: @load_alloca16_unknown_offset_align1_i32(
|
||||
; ALL: alloca [128 x i32], align 16
|
||||
|
||||
; UNALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
|
||||
; ALIGNED: load <2 x i32>, <2 x i32>* %{{[0-9]+}}, align 4{{$}}
|
||||
define void @load_alloca16_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i32], align 16
|
||||
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
|
||||
%val0 = load i32, i32* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1
|
||||
%val1 = load i32, i32* %ptr1, align 1
|
||||
%add = add i32 %val0, %val1
|
||||
store i32 %add, i32 addrspace(1)* %out
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL-LABEL: @store_unknown_offset_align1_i8(
|
||||
; ALL: alloca [128 x i8], align 1
|
||||
; UNALIGNED: store <2 x i8> <i8 9, i8 10>, <2 x i8>* %{{[0-9]+}}, align 1{{$}}
|
||||
|
||||
; ALIGNED: store i8 9, i8* %ptr0, align 1{{$}}
|
||||
; ALIGNED: store i8 10, i8* %ptr1, align 1{{$}}
|
||||
define void @store_unknown_offset_align1_i8(i8 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i8], align 1
|
||||
%ptr0 = getelementptr inbounds [128 x i8], [128 x i8]* %alloca, i32 0, i32 %offset
|
||||
store i8 9, i8* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i8, i8* %ptr0, i32 1
|
||||
store i8 10, i8* %ptr1, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; ALL-LABEL: @store_unknown_offset_align1_i16(
|
||||
; ALL: alloca [128 x i16], align 1
|
||||
; UNALIGNED: store <2 x i16> <i16 9, i16 10>, <2 x i16>* %{{[0-9]+}}, align 1{{$}}
|
||||
|
||||
; ALIGNED: store i16 9, i16* %ptr0, align 1{{$}}
|
||||
; ALIGNED: store i16 10, i16* %ptr1, align 1{{$}}
|
||||
define void @store_unknown_offset_align1_i16(i16 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i16], align 1
|
||||
%ptr0 = getelementptr inbounds [128 x i16], [128 x i16]* %alloca, i32 0, i32 %offset
|
||||
store i16 9, i16* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr0, i32 1
|
||||
store i16 10, i16* %ptr1, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
; FIXME: Although the offset is unknown here, we know it is a multiple
|
||||
; of the element size, so it still should be align 4.
|
||||
|
||||
; ALL-LABEL: @store_unknown_offset_align1_i32(
|
||||
; ALL: alloca [128 x i32], align 1
|
||||
|
||||
; UNALIGNED: store <2 x i32> <i32 9, i32 10>, <2 x i32>* %{{[0-9]+}}, align 1{{$}}
|
||||
|
||||
; ALIGNED: store i32 9, i32* %ptr0, align 1
|
||||
; ALIGNED: store i32 10, i32* %ptr1, align 1
|
||||
define void @store_unknown_offset_align1_i32(i32 addrspace(1)* noalias %out, i32 %offset) #0 {
|
||||
%alloca = alloca [128 x i32], align 1
|
||||
%ptr0 = getelementptr inbounds [128 x i32], [128 x i32]* %alloca, i32 0, i32 %offset
|
||||
store i32 9, i32* %ptr0, align 1
|
||||
%ptr1 = getelementptr inbounds i32, i32* %ptr0, i32 1
|
||||
store i32 10, i32* %ptr1, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
attributes #0 = { nounwind }
|
||||
|
Loading…
Reference in New Issue
Block a user