mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-23 19:23:23 +01:00
aea541125e
memory intrinsics in the SDAG builder. When alignment is zero, the lang ref says that *no* alignment assumptions can be made. This is the exact opposite of the internal API contracts of the DAG where alignment 0 indicates that the alignment can be made to be anything desired. There is another, more explicit alignment that is better suited for the role of "no alignment at all": an alignment of 1. Map the intrinsic alignment to this early so that we don't end up generating aligned DAGs. It is really terrifying that we've never seen this before, but we suddenly started generating a large number of alignment 0 memcpys due to the new code to do memcpy-based copying of POD class members. That patch contains a bug that rounds bitfield alignments down when they are the first field. This can in turn produce zero alignments. This fixes weird crashes I've seen in library users of LLVM on 32-bit hosts, etc. llvm-svn: 176022
56 lines
1.5 KiB
LLVM
56 lines
1.5 KiB
LLVM
; RUN: llc < %s -march=x86 -mcpu=pentium2 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=X86
|
|
; RUN: llc < %s -march=x86 -mcpu=pentium3 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=XMM
|
|
; RUN: llc < %s -march=x86 -mcpu=bdver1 -mtriple=i686-apple-darwin8.8.0 | FileCheck %s --check-prefix=YMM
|
|
|
|
%struct.x = type { i16, i16 }
|
|
|
|
define void @t() nounwind {
|
|
entry:
|
|
%up_mvd = alloca [8 x %struct.x] ; <[8 x %struct.x]*> [#uses=2]
|
|
%up_mvd116 = getelementptr [8 x %struct.x]* %up_mvd, i32 0, i32 0 ; <%struct.x*> [#uses=1]
|
|
%tmp110117 = bitcast [8 x %struct.x]* %up_mvd to i8* ; <i8*> [#uses=1]
|
|
|
|
call void @llvm.memset.p0i8.i64(i8* %tmp110117, i8 0, i64 32, i32 8, i1 false)
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86: movl $0,
|
|
; X86-NOT: movl $0,
|
|
; X86: ret
|
|
|
|
; XMM: xorps %xmm{{[0-9]+}}, [[Z:%xmm[0-9]+]]
|
|
; XMM: movaps [[Z]],
|
|
; XMM: movaps [[Z]],
|
|
; XMM-NOT: movaps
|
|
; XMM: ret
|
|
|
|
; YMM: vxorps %ymm{{[0-9]+}}, %ymm{{[0-9]+}}, [[Z:%ymm[0-9]+]]
|
|
; YMM: vmovaps [[Z]],
|
|
; YMM-NOT: movaps
|
|
; YMM: ret
|
|
|
|
call void @foo( %struct.x* %up_mvd116 ) nounwind
|
|
ret void
|
|
}
|
|
|
|
declare void @foo(%struct.x*)
|
|
|
|
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
|
|
|
|
define void @PR15348(i8* %a) {
|
|
; Ensure that alignment of '0' in an @llvm.memset intrinsic results in
|
|
; unaligned loads and stores.
|
|
; XMM: PR15348
|
|
; XMM: movb $0,
|
|
; XMM: movl $0,
|
|
; XMM: movl $0,
|
|
; XMM: movl $0,
|
|
; XMM: movl $0,
|
|
call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 17, i32 0, i1 false)
|
|
ret void
|
|
}
|