mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-22 18:54:02 +01:00
GlobalISel: Preserve memory type when reducing load/store width
This commit is contained in:
parent
82a59d63ea
commit
59f5d684fb
@ -3910,14 +3910,13 @@ LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx,
|
||||
unsigned PartSize = PartTy.getSizeInBits();
|
||||
for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize;
|
||||
Offset += PartSize, ++Idx) {
|
||||
unsigned ByteSize = PartSize / 8;
|
||||
unsigned ByteOffset = Offset / 8;
|
||||
Register NewAddrReg;
|
||||
|
||||
MIRBuilder.materializePtrAdd(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
|
||||
|
||||
MachineMemOperand *NewMMO =
|
||||
MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
|
||||
MF.getMachineMemOperand(MMO, ByteOffset, PartTy);
|
||||
|
||||
if (IsLoad) {
|
||||
Register Dst = MRI.createGenericVirtualRegister(PartTy);
|
||||
|
@ -32,7 +32,7 @@ define i128 @ABIi128(i128 %arg1) {
|
||||
ret i128 %res
|
||||
}
|
||||
|
||||
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(<3 x s32>), %4:_(p0) :: (store (s96) into %ir.addr + 16, align 16, basealign 32) (in function: odd_vector)
|
||||
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(<3 x s32>), %4:_(p0) :: (store (<3 x s32>) into %ir.addr + 16, align 16, basealign 32) (in function: odd_vector)
|
||||
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
|
||||
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
|
||||
define void @odd_vector(<7 x i32>* %addr) {
|
||||
|
@ -179,10 +179,10 @@ body: |
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
|
||||
; CHECK: %idx:_(s64) = COPY $x0
|
||||
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
|
||||
; CHECK: G_STORE [[COPY]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (s128) into %stack.0, align 32)
|
||||
; CHECK: G_STORE [[COPY]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (<2 x s64>) into %stack.0, align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
|
||||
; CHECK: G_STORE [[COPY1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into %stack.0 + 16, basealign 32)
|
||||
; CHECK: G_STORE [[COPY1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into %stack.0 + 16, basealign 32)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
|
||||
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %idx, [[C1]]
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
|
@ -19,10 +19,10 @@ body: |
|
||||
; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
|
||||
; CHECK: [[FPEXT:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV]](<2 x s32>)
|
||||
; CHECK: [[FPEXT1:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV1]](<2 x s32>)
|
||||
; CHECK: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
|
||||
; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(<4 x s32>) = COPY $q0
|
||||
%1:_(p0) = COPY $x0
|
||||
|
@ -117,10 +117,10 @@ body: |
|
||||
; CHECK: [[COPY5:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[FPTRUNC]](<2 x s32>), [[FPTRUNC1]](<2 x s32>)
|
||||
; CHECK: [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[FPTRUNC2]](<2 x s32>), [[FPTRUNC3]](<2 x s32>)
|
||||
; CHECK: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store (<4 x s32>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C]](s64)
|
||||
; CHECK: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%2:_(<2 x s64>) = COPY $q0
|
||||
%3:_(<2 x s64>) = COPY $q1
|
||||
|
@ -316,10 +316,10 @@ body: |
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
|
||||
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s8>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s8>), %ptr(p0) :: (store (<16 x s8>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%val:_(<32 x s8>) = G_IMPLICIT_DEF
|
||||
%ptr:_(p0) = COPY $x0
|
||||
@ -340,10 +340,10 @@ body: |
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
|
||||
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s16>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s16>), %ptr(p0) :: (store (<8 x s16>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%val:_(<16 x s16>) = G_IMPLICIT_DEF
|
||||
%ptr:_(p0) = COPY $x0
|
||||
@ -364,10 +364,10 @@ body: |
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
|
||||
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), %ptr(p0) :: (store (<4 x s32>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%val:_(<8 x s32>) = G_IMPLICIT_DEF
|
||||
%ptr:_(p0) = COPY $x0
|
||||
@ -386,10 +386,10 @@ body: |
|
||||
; CHECK: liveins: $x0
|
||||
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%val:_(<4 x s64>) = G_IMPLICIT_DEF
|
||||
%ptr:_(p0) = COPY $x0
|
||||
@ -407,13 +407,13 @@ body: |
|
||||
; CHECK-LABEL: name: load_32xs8
|
||||
; CHECK: liveins: $x0
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load (<16 x s8>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load (<16 x s8>) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store (<16 x s8>), align 32)
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store (<16 x s8>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%ptr:_(p0) = COPY $x0
|
||||
%val:_(<32 x s8>) = G_LOAD %ptr(p0) :: (load (<32 x s8>))
|
||||
@ -431,13 +431,13 @@ body: |
|
||||
; CHECK-LABEL: name: load_16xs16
|
||||
; CHECK: liveins: $x0
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load (<8 x s16>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (<8 x s16>) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store (<8 x s16>), align 32)
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store (<8 x s16>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%ptr:_(p0) = COPY $x0
|
||||
%val:_(<16 x s16>) = G_LOAD %ptr(p0) :: (load (<16 x s16>))
|
||||
@ -455,13 +455,13 @@ body: |
|
||||
; CHECK-LABEL: name: load_8xs32
|
||||
; CHECK: liveins: $x0
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store (<4 x s32>), align 32)
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store (<4 x s32>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%ptr:_(p0) = COPY $x0
|
||||
%val:_(<8 x s32>) = G_LOAD %ptr(p0) :: (load (<8 x s32>))
|
||||
@ -479,13 +479,13 @@ body: |
|
||||
; CHECK-LABEL: name: load_4xs64
|
||||
; CHECK: liveins: $x0
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (<2 x s64>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store (s128), align 32)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>), align 32)
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%ptr:_(p0) = COPY $x0
|
||||
%val:_(<4 x s64>) = G_LOAD %ptr(p0) :: (load (<4 x s64>))
|
||||
@ -527,13 +527,13 @@ body: |
|
||||
; CHECK: liveins: $x0
|
||||
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
|
||||
; CHECK: %ptr:_(p0) = COPY $x0
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (s128))
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (<2 x s64>))
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 32)
|
||||
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (<2 x s64>) into unknown-address + 32)
|
||||
; CHECK: RET_ReallyLR
|
||||
%val:_(<6 x s64>) = G_IMPLICIT_DEF
|
||||
%ptr:_(p0) = COPY $x0
|
||||
|
@ -153,10 +153,10 @@ body: |
|
||||
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY1]](<2 x s64>), [[COPY2]], shufflemask(1, 2)
|
||||
; CHECK: [[SHUF1:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY3]](<2 x s64>), [[COPY]], shufflemask(1, 2)
|
||||
; CHECK: G_STORE [[SHUF]](<2 x s64>), [[COPY4]](p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[SHUF]](<2 x s64>), [[COPY4]](p0) :: (store (<2 x s64>), align 32)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s64)
|
||||
; CHECK: G_STORE [[SHUF1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[SHUF1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%3:_(<2 x s64>) = COPY $q0
|
||||
%4:_(<2 x s64>) = COPY $q1
|
||||
@ -195,10 +195,10 @@ body: |
|
||||
; CHECK: [[EVEC3:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY3]](<4 x s32>), [[C3]](s64)
|
||||
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[EVEC]](s32), [[EVEC1]](s32), [[EVEC2]](s32), [[EVEC3]](s32)
|
||||
; CHECK: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY1]](<4 x s32>), [[COPY]], shufflemask(2, 6, 5, 3)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](p0) :: (store (s128), align 32)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](p0) :: (store (<4 x s32>), align 32)
|
||||
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[SHUF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
|
||||
; CHECK: G_STORE [[SHUF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
|
||||
; CHECK: RET_ReallyLR
|
||||
%3:_(<4 x s32>) = COPY $q0
|
||||
%4:_(<4 x s32>) = COPY $q1
|
||||
|
@ -1398,16 +1398,16 @@ body: |
|
||||
|
||||
; CHECK-LABEL: name: extract_vector_elt_7_v64s32
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (s512), align 4, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s512) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s512) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s512) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](<16 x s32>), 224
|
||||
; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s32)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
@ -1426,16 +1426,16 @@ body: |
|
||||
|
||||
; CHECK-LABEL: name: extract_vector_elt_33_v64s32
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (s512), align 4, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s512) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s512) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s512) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](<16 x s32>), 32
|
||||
; CHECK: S_ENDPGM 0, implicit [[EXTRACT]](s32)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
@ -1476,216 +1476,216 @@ body: |
|
||||
|
||||
; CHECK-LABEL: name: extract_vector_elt_33_v64p3
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (s512), align 4, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x p3>), align 4, addrspace 4)
|
||||
; CHECK: [[BITCAST:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD]](<16 x s32>)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s512) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x p3>) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[BITCAST1:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD1]](<16 x s32>)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s512) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x p3>) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[BITCAST2:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD2]](<16 x s32>)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s512) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x p3>) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[BITCAST3:%[0-9]+]]:_(<16 x p3>) = G_BITCAST [[LOAD3]](<16 x s32>)
|
||||
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
|
||||
; CHECK: [[UV:%[0-9]+]]:_(p3), [[UV1:%[0-9]+]]:_(p3), [[UV2:%[0-9]+]]:_(p3), [[UV3:%[0-9]+]]:_(p3), [[UV4:%[0-9]+]]:_(p3), [[UV5:%[0-9]+]]:_(p3), [[UV6:%[0-9]+]]:_(p3), [[UV7:%[0-9]+]]:_(p3), [[UV8:%[0-9]+]]:_(p3), [[UV9:%[0-9]+]]:_(p3), [[UV10:%[0-9]+]]:_(p3), [[UV11:%[0-9]+]]:_(p3), [[UV12:%[0-9]+]]:_(p3), [[UV13:%[0-9]+]]:_(p3), [[UV14:%[0-9]+]]:_(p3), [[UV15:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST]](<16 x p3>)
|
||||
; CHECK: [[UV16:%[0-9]+]]:_(p3), [[UV17:%[0-9]+]]:_(p3), [[UV18:%[0-9]+]]:_(p3), [[UV19:%[0-9]+]]:_(p3), [[UV20:%[0-9]+]]:_(p3), [[UV21:%[0-9]+]]:_(p3), [[UV22:%[0-9]+]]:_(p3), [[UV23:%[0-9]+]]:_(p3), [[UV24:%[0-9]+]]:_(p3), [[UV25:%[0-9]+]]:_(p3), [[UV26:%[0-9]+]]:_(p3), [[UV27:%[0-9]+]]:_(p3), [[UV28:%[0-9]+]]:_(p3), [[UV29:%[0-9]+]]:_(p3), [[UV30:%[0-9]+]]:_(p3), [[UV31:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST1]](<16 x p3>)
|
||||
; CHECK: [[UV32:%[0-9]+]]:_(p3), [[UV33:%[0-9]+]]:_(p3), [[UV34:%[0-9]+]]:_(p3), [[UV35:%[0-9]+]]:_(p3), [[UV36:%[0-9]+]]:_(p3), [[UV37:%[0-9]+]]:_(p3), [[UV38:%[0-9]+]]:_(p3), [[UV39:%[0-9]+]]:_(p3), [[UV40:%[0-9]+]]:_(p3), [[UV41:%[0-9]+]]:_(p3), [[UV42:%[0-9]+]]:_(p3), [[UV43:%[0-9]+]]:_(p3), [[UV44:%[0-9]+]]:_(p3), [[UV45:%[0-9]+]]:_(p3), [[UV46:%[0-9]+]]:_(p3), [[UV47:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST2]](<16 x p3>)
|
||||
; CHECK: [[UV48:%[0-9]+]]:_(p3), [[UV49:%[0-9]+]]:_(p3), [[UV50:%[0-9]+]]:_(p3), [[UV51:%[0-9]+]]:_(p3), [[UV52:%[0-9]+]]:_(p3), [[UV53:%[0-9]+]]:_(p3), [[UV54:%[0-9]+]]:_(p3), [[UV55:%[0-9]+]]:_(p3), [[UV56:%[0-9]+]]:_(p3), [[UV57:%[0-9]+]]:_(p3), [[UV58:%[0-9]+]]:_(p3), [[UV59:%[0-9]+]]:_(p3), [[UV60:%[0-9]+]]:_(p3), [[UV61:%[0-9]+]]:_(p3), [[UV62:%[0-9]+]]:_(p3), [[UV63:%[0-9]+]]:_(p3) = G_UNMERGE_VALUES [[BITCAST3]](<16 x p3>)
|
||||
; CHECK: G_STORE [[UV]](p3), [[FRAME_INDEX]](p5) :: (store (s32) into %stack.0, align 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV]](p3), [[FRAME_INDEX]](p5) :: (store (p3) into %stack.0, align 256, addrspace 5)
|
||||
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
|
||||
; CHECK: G_STORE [[UV1]](p3), [[PTR_ADD3]](p5) :: (store (s32) into %stack.0 + 4, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV1]](p3), [[PTR_ADD3]](p5) :: (store (p3) into %stack.0 + 4, basealign 256, addrspace 5)
|
||||
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
|
||||
; CHECK: G_STORE [[UV2]](p3), [[PTR_ADD4]](p5) :: (store (s32) into %stack.0 + 8, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV2]](p3), [[PTR_ADD4]](p5) :: (store (p3) into %stack.0 + 8, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
|
||||
; CHECK: G_STORE [[UV3]](p3), [[PTR_ADD5]](p5) :: (store (s32) into %stack.0 + 12, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV3]](p3), [[PTR_ADD5]](p5) :: (store (p3) into %stack.0 + 12, basealign 256, addrspace 5)
|
||||
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
|
||||
; CHECK: G_STORE [[UV4]](p3), [[PTR_ADD6]](p5) :: (store (s32) into %stack.0 + 16, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV4]](p3), [[PTR_ADD6]](p5) :: (store (p3) into %stack.0 + 16, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
|
||||
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
|
||||
; CHECK: G_STORE [[UV5]](p3), [[PTR_ADD7]](p5) :: (store (s32) into %stack.0 + 20, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV5]](p3), [[PTR_ADD7]](p5) :: (store (p3) into %stack.0 + 20, basealign 256, addrspace 5)
|
||||
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
||||
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
|
||||
; CHECK: G_STORE [[UV6]](p3), [[PTR_ADD8]](p5) :: (store (s32) into %stack.0 + 24, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV6]](p3), [[PTR_ADD8]](p5) :: (store (p3) into %stack.0 + 24, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
|
||||
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
|
||||
; CHECK: G_STORE [[UV7]](p3), [[PTR_ADD9]](p5) :: (store (s32) into %stack.0 + 28, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV7]](p3), [[PTR_ADD9]](p5) :: (store (p3) into %stack.0 + 28, basealign 256, addrspace 5)
|
||||
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
|
||||
; CHECK: G_STORE [[UV8]](p3), [[PTR_ADD10]](p5) :: (store (s32) into %stack.0 + 32, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV8]](p3), [[PTR_ADD10]](p5) :: (store (p3) into %stack.0 + 32, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
|
||||
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
|
||||
; CHECK: G_STORE [[UV9]](p3), [[PTR_ADD11]](p5) :: (store (s32) into %stack.0 + 36, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV9]](p3), [[PTR_ADD11]](p5) :: (store (p3) into %stack.0 + 36, basealign 256, addrspace 5)
|
||||
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
|
||||
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
|
||||
; CHECK: G_STORE [[UV10]](p3), [[PTR_ADD12]](p5) :: (store (s32) into %stack.0 + 40, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV10]](p3), [[PTR_ADD12]](p5) :: (store (p3) into %stack.0 + 40, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
|
||||
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
|
||||
; CHECK: G_STORE [[UV11]](p3), [[PTR_ADD13]](p5) :: (store (s32) into %stack.0 + 44, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV11]](p3), [[PTR_ADD13]](p5) :: (store (p3) into %stack.0 + 44, basealign 256, addrspace 5)
|
||||
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
|
||||
; CHECK: G_STORE [[UV12]](p3), [[PTR_ADD14]](p5) :: (store (s32) into %stack.0 + 48, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV12]](p3), [[PTR_ADD14]](p5) :: (store (p3) into %stack.0 + 48, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
|
||||
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
|
||||
; CHECK: G_STORE [[UV13]](p3), [[PTR_ADD15]](p5) :: (store (s32) into %stack.0 + 52, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV13]](p3), [[PTR_ADD15]](p5) :: (store (p3) into %stack.0 + 52, basealign 256, addrspace 5)
|
||||
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
|
||||
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
|
||||
; CHECK: G_STORE [[UV14]](p3), [[PTR_ADD16]](p5) :: (store (s32) into %stack.0 + 56, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV14]](p3), [[PTR_ADD16]](p5) :: (store (p3) into %stack.0 + 56, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
|
||||
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
|
||||
; CHECK: G_STORE [[UV15]](p3), [[PTR_ADD17]](p5) :: (store (s32) into %stack.0 + 60, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV15]](p3), [[PTR_ADD17]](p5) :: (store (p3) into %stack.0 + 60, basealign 256, addrspace 5)
|
||||
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
|
||||
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
|
||||
; CHECK: G_STORE [[UV16]](p3), [[PTR_ADD18]](p5) :: (store (s32) into %stack.0 + 64, align 64, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV16]](p3), [[PTR_ADD18]](p5) :: (store (p3) into %stack.0 + 64, align 64, basealign 256, addrspace 5)
|
||||
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
|
||||
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
|
||||
; CHECK: G_STORE [[UV17]](p3), [[PTR_ADD19]](p5) :: (store (s32) into %stack.0 + 68, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV17]](p3), [[PTR_ADD19]](p5) :: (store (p3) into %stack.0 + 68, basealign 256, addrspace 5)
|
||||
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
|
||||
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
|
||||
; CHECK: G_STORE [[UV18]](p3), [[PTR_ADD20]](p5) :: (store (s32) into %stack.0 + 72, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV18]](p3), [[PTR_ADD20]](p5) :: (store (p3) into %stack.0 + 72, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
|
||||
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
|
||||
; CHECK: G_STORE [[UV19]](p3), [[PTR_ADD21]](p5) :: (store (s32) into %stack.0 + 76, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV19]](p3), [[PTR_ADD21]](p5) :: (store (p3) into %stack.0 + 76, basealign 256, addrspace 5)
|
||||
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
|
||||
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
|
||||
; CHECK: G_STORE [[UV20]](p3), [[PTR_ADD22]](p5) :: (store (s32) into %stack.0 + 80, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV20]](p3), [[PTR_ADD22]](p5) :: (store (p3) into %stack.0 + 80, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
|
||||
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
|
||||
; CHECK: G_STORE [[UV21]](p3), [[PTR_ADD23]](p5) :: (store (s32) into %stack.0 + 84, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV21]](p3), [[PTR_ADD23]](p5) :: (store (p3) into %stack.0 + 84, basealign 256, addrspace 5)
|
||||
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
|
||||
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
|
||||
; CHECK: G_STORE [[UV22]](p3), [[PTR_ADD24]](p5) :: (store (s32) into %stack.0 + 88, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV22]](p3), [[PTR_ADD24]](p5) :: (store (p3) into %stack.0 + 88, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
|
||||
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
|
||||
; CHECK: G_STORE [[UV23]](p3), [[PTR_ADD25]](p5) :: (store (s32) into %stack.0 + 92, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV23]](p3), [[PTR_ADD25]](p5) :: (store (p3) into %stack.0 + 92, basealign 256, addrspace 5)
|
||||
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
|
||||
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
|
||||
; CHECK: G_STORE [[UV24]](p3), [[PTR_ADD26]](p5) :: (store (s32) into %stack.0 + 96, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV24]](p3), [[PTR_ADD26]](p5) :: (store (p3) into %stack.0 + 96, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
|
||||
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
|
||||
; CHECK: G_STORE [[UV25]](p3), [[PTR_ADD27]](p5) :: (store (s32) into %stack.0 + 100, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV25]](p3), [[PTR_ADD27]](p5) :: (store (p3) into %stack.0 + 100, basealign 256, addrspace 5)
|
||||
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
|
||||
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
|
||||
; CHECK: G_STORE [[UV26]](p3), [[PTR_ADD28]](p5) :: (store (s32) into %stack.0 + 104, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV26]](p3), [[PTR_ADD28]](p5) :: (store (p3) into %stack.0 + 104, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
|
||||
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
|
||||
; CHECK: G_STORE [[UV27]](p3), [[PTR_ADD29]](p5) :: (store (s32) into %stack.0 + 108, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV27]](p3), [[PTR_ADD29]](p5) :: (store (p3) into %stack.0 + 108, basealign 256, addrspace 5)
|
||||
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
|
||||
; CHECK: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
|
||||
; CHECK: G_STORE [[UV28]](p3), [[PTR_ADD30]](p5) :: (store (s32) into %stack.0 + 112, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV28]](p3), [[PTR_ADD30]](p5) :: (store (p3) into %stack.0 + 112, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
|
||||
; CHECK: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
|
||||
; CHECK: G_STORE [[UV29]](p3), [[PTR_ADD31]](p5) :: (store (s32) into %stack.0 + 116, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV29]](p3), [[PTR_ADD31]](p5) :: (store (p3) into %stack.0 + 116, basealign 256, addrspace 5)
|
||||
; CHECK: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
|
||||
; CHECK: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
|
||||
; CHECK: G_STORE [[UV30]](p3), [[PTR_ADD32]](p5) :: (store (s32) into %stack.0 + 120, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV30]](p3), [[PTR_ADD32]](p5) :: (store (p3) into %stack.0 + 120, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
|
||||
; CHECK: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
|
||||
; CHECK: G_STORE [[UV31]](p3), [[PTR_ADD33]](p5) :: (store (s32) into %stack.0 + 124, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV31]](p3), [[PTR_ADD33]](p5) :: (store (p3) into %stack.0 + 124, basealign 256, addrspace 5)
|
||||
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
|
||||
; CHECK: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
|
||||
; CHECK: G_STORE [[UV32]](p3), [[PTR_ADD34]](p5) :: (store (s32) into %stack.0 + 128, align 128, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV32]](p3), [[PTR_ADD34]](p5) :: (store (p3) into %stack.0 + 128, align 128, basealign 256, addrspace 5)
|
||||
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
|
||||
; CHECK: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p5) = COPY [[PTR_ADD35]](p5)
|
||||
; CHECK: G_STORE [[UV33]](p3), [[COPY1]](p5) :: (store (s32) into %stack.0 + 132, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV33]](p3), [[COPY1]](p5) :: (store (p3) into %stack.0 + 132, basealign 256, addrspace 5)
|
||||
; CHECK: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
|
||||
; CHECK: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
|
||||
; CHECK: G_STORE [[UV34]](p3), [[PTR_ADD36]](p5) :: (store (s32) into %stack.0 + 136, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV34]](p3), [[PTR_ADD36]](p5) :: (store (p3) into %stack.0 + 136, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
|
||||
; CHECK: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
|
||||
; CHECK: G_STORE [[UV35]](p3), [[PTR_ADD37]](p5) :: (store (s32) into %stack.0 + 140, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV35]](p3), [[PTR_ADD37]](p5) :: (store (p3) into %stack.0 + 140, basealign 256, addrspace 5)
|
||||
; CHECK: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
|
||||
; CHECK: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
|
||||
; CHECK: G_STORE [[UV36]](p3), [[PTR_ADD38]](p5) :: (store (s32) into %stack.0 + 144, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV36]](p3), [[PTR_ADD38]](p5) :: (store (p3) into %stack.0 + 144, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
|
||||
; CHECK: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
|
||||
; CHECK: G_STORE [[UV37]](p3), [[PTR_ADD39]](p5) :: (store (s32) into %stack.0 + 148, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV37]](p3), [[PTR_ADD39]](p5) :: (store (p3) into %stack.0 + 148, basealign 256, addrspace 5)
|
||||
; CHECK: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
|
||||
; CHECK: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
|
||||
; CHECK: G_STORE [[UV38]](p3), [[PTR_ADD40]](p5) :: (store (s32) into %stack.0 + 152, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV38]](p3), [[PTR_ADD40]](p5) :: (store (p3) into %stack.0 + 152, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
|
||||
; CHECK: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
|
||||
; CHECK: G_STORE [[UV39]](p3), [[PTR_ADD41]](p5) :: (store (s32) into %stack.0 + 156, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV39]](p3), [[PTR_ADD41]](p5) :: (store (p3) into %stack.0 + 156, basealign 256, addrspace 5)
|
||||
; CHECK: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
|
||||
; CHECK: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
|
||||
; CHECK: G_STORE [[UV40]](p3), [[PTR_ADD42]](p5) :: (store (s32) into %stack.0 + 160, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV40]](p3), [[PTR_ADD42]](p5) :: (store (p3) into %stack.0 + 160, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
|
||||
; CHECK: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
|
||||
; CHECK: G_STORE [[UV41]](p3), [[PTR_ADD43]](p5) :: (store (s32) into %stack.0 + 164, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV41]](p3), [[PTR_ADD43]](p5) :: (store (p3) into %stack.0 + 164, basealign 256, addrspace 5)
|
||||
; CHECK: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
|
||||
; CHECK: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
|
||||
; CHECK: G_STORE [[UV42]](p3), [[PTR_ADD44]](p5) :: (store (s32) into %stack.0 + 168, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV42]](p3), [[PTR_ADD44]](p5) :: (store (p3) into %stack.0 + 168, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
|
||||
; CHECK: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
|
||||
; CHECK: G_STORE [[UV43]](p3), [[PTR_ADD45]](p5) :: (store (s32) into %stack.0 + 172, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV43]](p3), [[PTR_ADD45]](p5) :: (store (p3) into %stack.0 + 172, basealign 256, addrspace 5)
|
||||
; CHECK: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
|
||||
; CHECK: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
|
||||
; CHECK: G_STORE [[UV44]](p3), [[PTR_ADD46]](p5) :: (store (s32) into %stack.0 + 176, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV44]](p3), [[PTR_ADD46]](p5) :: (store (p3) into %stack.0 + 176, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
|
||||
; CHECK: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
|
||||
; CHECK: G_STORE [[UV45]](p3), [[PTR_ADD47]](p5) :: (store (s32) into %stack.0 + 180, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV45]](p3), [[PTR_ADD47]](p5) :: (store (p3) into %stack.0 + 180, basealign 256, addrspace 5)
|
||||
; CHECK: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
|
||||
; CHECK: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
|
||||
; CHECK: G_STORE [[UV46]](p3), [[PTR_ADD48]](p5) :: (store (s32) into %stack.0 + 184, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV46]](p3), [[PTR_ADD48]](p5) :: (store (p3) into %stack.0 + 184, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
|
||||
; CHECK: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
|
||||
; CHECK: G_STORE [[UV47]](p3), [[PTR_ADD49]](p5) :: (store (s32) into %stack.0 + 188, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV47]](p3), [[PTR_ADD49]](p5) :: (store (p3) into %stack.0 + 188, basealign 256, addrspace 5)
|
||||
; CHECK: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
|
||||
; CHECK: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
|
||||
; CHECK: G_STORE [[UV48]](p3), [[PTR_ADD50]](p5) :: (store (s32) into %stack.0 + 192, align 64, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV48]](p3), [[PTR_ADD50]](p5) :: (store (p3) into %stack.0 + 192, align 64, basealign 256, addrspace 5)
|
||||
; CHECK: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
|
||||
; CHECK: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
|
||||
; CHECK: G_STORE [[UV49]](p3), [[PTR_ADD51]](p5) :: (store (s32) into %stack.0 + 196, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV49]](p3), [[PTR_ADD51]](p5) :: (store (p3) into %stack.0 + 196, basealign 256, addrspace 5)
|
||||
; CHECK: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
|
||||
; CHECK: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
|
||||
; CHECK: G_STORE [[UV50]](p3), [[PTR_ADD52]](p5) :: (store (s32) into %stack.0 + 200, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV50]](p3), [[PTR_ADD52]](p5) :: (store (p3) into %stack.0 + 200, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
|
||||
; CHECK: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
|
||||
; CHECK: G_STORE [[UV51]](p3), [[PTR_ADD53]](p5) :: (store (s32) into %stack.0 + 204, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV51]](p3), [[PTR_ADD53]](p5) :: (store (p3) into %stack.0 + 204, basealign 256, addrspace 5)
|
||||
; CHECK: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
|
||||
; CHECK: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
|
||||
; CHECK: G_STORE [[UV52]](p3), [[PTR_ADD54]](p5) :: (store (s32) into %stack.0 + 208, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV52]](p3), [[PTR_ADD54]](p5) :: (store (p3) into %stack.0 + 208, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
|
||||
; CHECK: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
|
||||
; CHECK: G_STORE [[UV53]](p3), [[PTR_ADD55]](p5) :: (store (s32) into %stack.0 + 212, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV53]](p3), [[PTR_ADD55]](p5) :: (store (p3) into %stack.0 + 212, basealign 256, addrspace 5)
|
||||
; CHECK: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
|
||||
; CHECK: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
|
||||
; CHECK: G_STORE [[UV54]](p3), [[PTR_ADD56]](p5) :: (store (s32) into %stack.0 + 216, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV54]](p3), [[PTR_ADD56]](p5) :: (store (p3) into %stack.0 + 216, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
|
||||
; CHECK: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
|
||||
; CHECK: G_STORE [[UV55]](p3), [[PTR_ADD57]](p5) :: (store (s32) into %stack.0 + 220, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV55]](p3), [[PTR_ADD57]](p5) :: (store (p3) into %stack.0 + 220, basealign 256, addrspace 5)
|
||||
; CHECK: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
|
||||
; CHECK: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
|
||||
; CHECK: G_STORE [[UV56]](p3), [[PTR_ADD58]](p5) :: (store (s32) into %stack.0 + 224, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV56]](p3), [[PTR_ADD58]](p5) :: (store (p3) into %stack.0 + 224, align 32, basealign 256, addrspace 5)
|
||||
; CHECK: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
|
||||
; CHECK: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
|
||||
; CHECK: G_STORE [[UV57]](p3), [[PTR_ADD59]](p5) :: (store (s32) into %stack.0 + 228, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV57]](p3), [[PTR_ADD59]](p5) :: (store (p3) into %stack.0 + 228, basealign 256, addrspace 5)
|
||||
; CHECK: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
|
||||
; CHECK: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
|
||||
; CHECK: G_STORE [[UV58]](p3), [[PTR_ADD60]](p5) :: (store (s32) into %stack.0 + 232, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV58]](p3), [[PTR_ADD60]](p5) :: (store (p3) into %stack.0 + 232, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
|
||||
; CHECK: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
|
||||
; CHECK: G_STORE [[UV59]](p3), [[PTR_ADD61]](p5) :: (store (s32) into %stack.0 + 236, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV59]](p3), [[PTR_ADD61]](p5) :: (store (p3) into %stack.0 + 236, basealign 256, addrspace 5)
|
||||
; CHECK: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
|
||||
; CHECK: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
|
||||
; CHECK: G_STORE [[UV60]](p3), [[PTR_ADD62]](p5) :: (store (s32) into %stack.0 + 240, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV60]](p3), [[PTR_ADD62]](p5) :: (store (p3) into %stack.0 + 240, align 16, basealign 256, addrspace 5)
|
||||
; CHECK: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
|
||||
; CHECK: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
|
||||
; CHECK: G_STORE [[UV61]](p3), [[PTR_ADD63]](p5) :: (store (s32) into %stack.0 + 244, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV61]](p3), [[PTR_ADD63]](p5) :: (store (p3) into %stack.0 + 244, basealign 256, addrspace 5)
|
||||
; CHECK: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
|
||||
; CHECK: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
|
||||
; CHECK: G_STORE [[UV62]](p3), [[PTR_ADD64]](p5) :: (store (s32) into %stack.0 + 248, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV62]](p3), [[PTR_ADD64]](p5) :: (store (p3) into %stack.0 + 248, align 8, basealign 256, addrspace 5)
|
||||
; CHECK: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
|
||||
; CHECK: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
|
||||
; CHECK: G_STORE [[UV63]](p3), [[PTR_ADD65]](p5) :: (store (s32) into %stack.0 + 252, basealign 256, addrspace 5)
|
||||
; CHECK: G_STORE [[UV63]](p3), [[PTR_ADD65]](p5) :: (store (p3) into %stack.0 + 252, basealign 256, addrspace 5)
|
||||
; CHECK: [[LOAD4:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD35]](p5) :: (load (p3) from %stack.0 + 132, addrspace 5)
|
||||
; CHECK: S_ENDPGM 0, implicit [[LOAD4]](p3)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
@ -1705,16 +1705,16 @@ body: |
|
||||
; CHECK-LABEL: name: extract_vector_elt_varidx_v64s32
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (s512), align 4, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s512) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s512) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s512) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
|
||||
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>)
|
||||
; CHECK: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
|
||||
|
@ -197,87 +197,87 @@ body: |
|
||||
; CHECK: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: [[UV8:%[0-9]+]]:_(<4 x s32>), [[UV9:%[0-9]+]]:_(<4 x s32>), [[UV10:%[0-9]+]]:_(<4 x s32>), [[UV11:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: [[UV12:%[0-9]+]]:_(<4 x s32>), [[UV13:%[0-9]+]]:_(<4 x s32>), [[UV14:%[0-9]+]]:_(<4 x s32>), [[UV15:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: G_STORE [[UV]](<4 x s32>), [[COPY1]](p1) :: (store (s128), align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<4 x s32>), [[COPY1]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (s128) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64)
|
||||
; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD1]](p1) :: (store (s128) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD1]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD2]](p1) :: (store (s128) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD2]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C3]](s64)
|
||||
; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD3]](p1) :: (store (s128) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD3]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
|
||||
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD4]](p1) :: (store (s128) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD4]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
|
||||
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C5]](s64)
|
||||
; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD5]](p1) :: (store (s128) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD5]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
|
||||
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C6]](s64)
|
||||
; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD6]](p1) :: (store (s128) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD6]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C7]](s64)
|
||||
; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD7]](p1) :: (store (s128) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD7]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
|
||||
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C8]](s64)
|
||||
; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD8]](p1) :: (store (s128) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD8]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 160
|
||||
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C9]](s64)
|
||||
; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD9]](p1) :: (store (s128) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD9]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 176
|
||||
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C10]](s64)
|
||||
; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD10]](p1) :: (store (s128) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD10]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C11]](s64)
|
||||
; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD11]](p1) :: (store (s128) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD11]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 208
|
||||
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C12]](s64)
|
||||
; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD12]](p1) :: (store (s128) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD12]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 224
|
||||
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C13]](s64)
|
||||
; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD13]](p1) :: (store (s128) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD13]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 240
|
||||
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C14]](s64)
|
||||
; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD14]](p1) :: (store (s128) into unknown-address + 240, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD14]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
|
||||
; CHECK: [[UV16:%[0-9]+]]:_(<4 x s32>), [[UV17:%[0-9]+]]:_(<4 x s32>), [[UV18:%[0-9]+]]:_(<4 x s32>), [[UV19:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: [[UV20:%[0-9]+]]:_(<4 x s32>), [[UV21:%[0-9]+]]:_(<4 x s32>), [[UV22:%[0-9]+]]:_(<4 x s32>), [[UV23:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: [[UV24:%[0-9]+]]:_(<4 x s32>), [[UV25:%[0-9]+]]:_(<4 x s32>), [[UV26:%[0-9]+]]:_(<4 x s32>), [[UV27:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: [[UV28:%[0-9]+]]:_(<4 x s32>), [[UV29:%[0-9]+]]:_(<4 x s32>), [[UV30:%[0-9]+]]:_(<4 x s32>), [[UV31:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
|
||||
; CHECK: G_STORE [[UV16]](<4 x s32>), [[COPY2]](p1) :: (store (s128), align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV16]](<4 x s32>), [[COPY2]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C]](s64)
|
||||
; CHECK: G_STORE [[UV17]](<4 x s32>), [[PTR_ADD15]](p1) :: (store (s128) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV17]](<4 x s32>), [[PTR_ADD15]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C1]](s64)
|
||||
; CHECK: G_STORE [[UV18]](<4 x s32>), [[PTR_ADD16]](p1) :: (store (s128) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV18]](<4 x s32>), [[PTR_ADD16]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV19]](<4 x s32>), [[PTR_ADD17]](p1) :: (store (s128) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV19]](<4 x s32>), [[PTR_ADD17]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C3]](s64)
|
||||
; CHECK: G_STORE [[UV20]](<4 x s32>), [[PTR_ADD18]](p1) :: (store (s128) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV20]](<4 x s32>), [[PTR_ADD18]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[UV21]](<4 x s32>), [[PTR_ADD19]](p1) :: (store (s128) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV21]](<4 x s32>), [[PTR_ADD19]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C5]](s64)
|
||||
; CHECK: G_STORE [[UV22]](<4 x s32>), [[PTR_ADD20]](p1) :: (store (s128) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV22]](<4 x s32>), [[PTR_ADD20]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C6]](s64)
|
||||
; CHECK: G_STORE [[UV23]](<4 x s32>), [[PTR_ADD21]](p1) :: (store (s128) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV23]](<4 x s32>), [[PTR_ADD21]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C7]](s64)
|
||||
; CHECK: G_STORE [[UV24]](<4 x s32>), [[PTR_ADD22]](p1) :: (store (s128) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV24]](<4 x s32>), [[PTR_ADD22]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C8]](s64)
|
||||
; CHECK: G_STORE [[UV25]](<4 x s32>), [[PTR_ADD23]](p1) :: (store (s128) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV25]](<4 x s32>), [[PTR_ADD23]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C9]](s64)
|
||||
; CHECK: G_STORE [[UV26]](<4 x s32>), [[PTR_ADD24]](p1) :: (store (s128) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV26]](<4 x s32>), [[PTR_ADD24]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C10]](s64)
|
||||
; CHECK: G_STORE [[UV27]](<4 x s32>), [[PTR_ADD25]](p1) :: (store (s128) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV27]](<4 x s32>), [[PTR_ADD25]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C11]](s64)
|
||||
; CHECK: G_STORE [[UV28]](<4 x s32>), [[PTR_ADD26]](p1) :: (store (s128) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV28]](<4 x s32>), [[PTR_ADD26]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C12]](s64)
|
||||
; CHECK: G_STORE [[UV29]](<4 x s32>), [[PTR_ADD27]](p1) :: (store (s128) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV29]](<4 x s32>), [[PTR_ADD27]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C13]](s64)
|
||||
; CHECK: G_STORE [[UV30]](<4 x s32>), [[PTR_ADD28]](p1) :: (store (s128) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV30]](<4 x s32>), [[PTR_ADD28]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY2]], [[C14]](s64)
|
||||
; CHECK: G_STORE [[UV31]](<4 x s32>), [[PTR_ADD29]](p1) :: (store (s128) into unknown-address + 240, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV31]](<4 x s32>), [[PTR_ADD29]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(s32) = G_CONSTANT i32 64
|
||||
%2:_(<64 x s32>) = G_LOAD %0 :: (load (<64 x s32>), align 4, addrspace 4)
|
||||
@ -300,16 +300,16 @@ body: |
|
||||
|
||||
; CHECK-LABEL: name: insert_vector_elt_33_v64s32
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (s512), align 4, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s512) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s512) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s512) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12345
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s32>) = G_CONCAT_VECTORS [[LOAD2]](<16 x s32>), [[LOAD3]](<16 x s32>)
|
||||
; CHECK: [[INSERT:%[0-9]+]]:_(<32 x s32>) = G_INSERT [[CONCAT_VECTORS]], [[C3]](s32), 32
|
||||
@ -317,49 +317,49 @@ body: |
|
||||
; CHECK: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>), [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>)
|
||||
; CHECK: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD1]](<16 x s32>)
|
||||
; CHECK: [[UV8:%[0-9]+]]:_(<4 x s32>), [[UV9:%[0-9]+]]:_(<4 x s32>), [[UV10:%[0-9]+]]:_(<4 x s32>), [[UV11:%[0-9]+]]:_(<4 x s32>), [[UV12:%[0-9]+]]:_(<4 x s32>), [[UV13:%[0-9]+]]:_(<4 x s32>), [[UV14:%[0-9]+]]:_(<4 x s32>), [[UV15:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[INSERT]](<32 x s32>)
|
||||
; CHECK: G_STORE [[UV]](<4 x s32>), [[COPY1]](p1) :: (store (s128), align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<4 x s32>), [[COPY1]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
|
||||
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD3]](p1) :: (store (s128) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<4 x s32>), [[PTR_ADD3]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C5]](s64)
|
||||
; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD4]](p1) :: (store (s128) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV2]](<4 x s32>), [[PTR_ADD4]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C6]](s64)
|
||||
; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD5]](p1) :: (store (s128) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV3]](<4 x s32>), [[PTR_ADD5]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
|
||||
; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD6]](p1) :: (store (s128) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV4]](<4 x s32>), [[PTR_ADD6]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
|
||||
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C7]](s64)
|
||||
; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD7]](p1) :: (store (s128) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV5]](<4 x s32>), [[PTR_ADD7]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
|
||||
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C8]](s64)
|
||||
; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD8]](p1) :: (store (s128) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV6]](<4 x s32>), [[PTR_ADD8]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
|
||||
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C9]](s64)
|
||||
; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD9]](p1) :: (store (s128) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV7]](<4 x s32>), [[PTR_ADD9]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C1]](s64)
|
||||
; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD10]](p1) :: (store (s128) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV8]](<4 x s32>), [[PTR_ADD10]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
|
||||
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C10]](s64)
|
||||
; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD11]](p1) :: (store (s128) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV9]](<4 x s32>), [[PTR_ADD11]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 160
|
||||
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C11]](s64)
|
||||
; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD12]](p1) :: (store (s128) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV10]](<4 x s32>), [[PTR_ADD12]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 176
|
||||
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C12]](s64)
|
||||
; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD13]](p1) :: (store (s128) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV11]](<4 x s32>), [[PTR_ADD13]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD14]](p1) :: (store (s128) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV12]](<4 x s32>), [[PTR_ADD14]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 208
|
||||
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C13]](s64)
|
||||
; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD15]](p1) :: (store (s128) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV13]](<4 x s32>), [[PTR_ADD15]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 224
|
||||
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C14]](s64)
|
||||
; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD16]](p1) :: (store (s128) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV14]](<4 x s32>), [[PTR_ADD16]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 240
|
||||
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C15]](s64)
|
||||
; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD17]](p1) :: (store (s128) into unknown-address + 240, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[UV15]](<4 x s32>), [[PTR_ADD17]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(s32) = G_CONSTANT i32 33
|
||||
%2:_(<64 x s32>) = G_LOAD %0 :: (load (<64 x s32>), align 4, addrspace 4)
|
||||
@ -379,16 +379,16 @@ body: |
|
||||
; CHECK-LABEL: name: insert_vector_elt_varidx_v64s32
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (s512), align 4, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[COPY]](p1) :: (load (<16 x s32>), align 4, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s512) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<16 x s32>) from unknown-address + 64, align 4, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s512) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<16 x s32>) from unknown-address + 128, align 4, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 192
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s512) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<16 x s32>) from unknown-address + 192, align 4, addrspace 4)
|
||||
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12345
|
||||
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0
|
||||
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<16 x s32>)
|
||||
@ -734,49 +734,49 @@ body: |
|
||||
; CHECK: [[BUILD_VECTOR13:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD56]](s32), [[LOAD57]](s32), [[LOAD58]](s32), [[LOAD59]](s32)
|
||||
; CHECK: [[BUILD_VECTOR14:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD60]](s32), [[LOAD61]](s32), [[LOAD62]](s32), [[LOAD63]](s32)
|
||||
; CHECK: [[BUILD_VECTOR15:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD64]](s32), [[LOAD65]](s32), [[LOAD66]](s32), [[LOAD67]](s32)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY65]](p1) :: (store (s128), align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY65]](p1) :: (store (<4 x s32>), align 4, addrspace 1)
|
||||
; CHECK: [[C68:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD67:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C68]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD67]](p1) :: (store (s128) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD67]](p1) :: (store (<4 x s32>) into unknown-address + 16, align 4, addrspace 1)
|
||||
; CHECK: [[C69:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD68:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C69]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[PTR_ADD68]](p1) :: (store (s128) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[PTR_ADD68]](p1) :: (store (<4 x s32>) into unknown-address + 32, align 4, addrspace 1)
|
||||
; CHECK: [[C70:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD69:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C70]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR3]](<4 x s32>), [[PTR_ADD69]](p1) :: (store (s128) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR3]](<4 x s32>), [[PTR_ADD69]](p1) :: (store (<4 x s32>) into unknown-address + 48, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD70:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR4]](<4 x s32>), [[PTR_ADD70]](p1) :: (store (s128) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR4]](<4 x s32>), [[PTR_ADD70]](p1) :: (store (<4 x s32>) into unknown-address + 64, align 4, addrspace 1)
|
||||
; CHECK: [[C71:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
|
||||
; CHECK: [[PTR_ADD71:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C71]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR5]](<4 x s32>), [[PTR_ADD71]](p1) :: (store (s128) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR5]](<4 x s32>), [[PTR_ADD71]](p1) :: (store (<4 x s32>) into unknown-address + 80, align 4, addrspace 1)
|
||||
; CHECK: [[C72:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
|
||||
; CHECK: [[PTR_ADD72:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C72]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR6]](<4 x s32>), [[PTR_ADD72]](p1) :: (store (s128) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR6]](<4 x s32>), [[PTR_ADD72]](p1) :: (store (<4 x s32>) into unknown-address + 96, align 4, addrspace 1)
|
||||
; CHECK: [[C73:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
|
||||
; CHECK: [[PTR_ADD73:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C73]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR7]](<4 x s32>), [[PTR_ADD73]](p1) :: (store (s128) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR7]](<4 x s32>), [[PTR_ADD73]](p1) :: (store (<4 x s32>) into unknown-address + 112, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD74:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C1]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR8]](<4 x s32>), [[PTR_ADD74]](p1) :: (store (s128) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR8]](<4 x s32>), [[PTR_ADD74]](p1) :: (store (<4 x s32>) into unknown-address + 128, align 4, addrspace 1)
|
||||
; CHECK: [[C74:%[0-9]+]]:_(s64) = G_CONSTANT i64 144
|
||||
; CHECK: [[PTR_ADD75:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C74]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR9]](<4 x s32>), [[PTR_ADD75]](p1) :: (store (s128) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR9]](<4 x s32>), [[PTR_ADD75]](p1) :: (store (<4 x s32>) into unknown-address + 144, align 4, addrspace 1)
|
||||
; CHECK: [[C75:%[0-9]+]]:_(s64) = G_CONSTANT i64 160
|
||||
; CHECK: [[PTR_ADD76:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C75]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR10]](<4 x s32>), [[PTR_ADD76]](p1) :: (store (s128) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR10]](<4 x s32>), [[PTR_ADD76]](p1) :: (store (<4 x s32>) into unknown-address + 160, align 4, addrspace 1)
|
||||
; CHECK: [[C76:%[0-9]+]]:_(s64) = G_CONSTANT i64 176
|
||||
; CHECK: [[PTR_ADD77:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C76]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR11]](<4 x s32>), [[PTR_ADD77]](p1) :: (store (s128) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR11]](<4 x s32>), [[PTR_ADD77]](p1) :: (store (<4 x s32>) into unknown-address + 176, align 4, addrspace 1)
|
||||
; CHECK: [[PTR_ADD78:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR12]](<4 x s32>), [[PTR_ADD78]](p1) :: (store (s128) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR12]](<4 x s32>), [[PTR_ADD78]](p1) :: (store (<4 x s32>) into unknown-address + 192, align 4, addrspace 1)
|
||||
; CHECK: [[C77:%[0-9]+]]:_(s64) = G_CONSTANT i64 208
|
||||
; CHECK: [[PTR_ADD79:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C77]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR13]](<4 x s32>), [[PTR_ADD79]](p1) :: (store (s128) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR13]](<4 x s32>), [[PTR_ADD79]](p1) :: (store (<4 x s32>) into unknown-address + 208, align 4, addrspace 1)
|
||||
; CHECK: [[C78:%[0-9]+]]:_(s64) = G_CONSTANT i64 224
|
||||
; CHECK: [[PTR_ADD80:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C78]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR14]](<4 x s32>), [[PTR_ADD80]](p1) :: (store (s128) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR14]](<4 x s32>), [[PTR_ADD80]](p1) :: (store (<4 x s32>) into unknown-address + 224, align 4, addrspace 1)
|
||||
; CHECK: [[C79:%[0-9]+]]:_(s64) = G_CONSTANT i64 240
|
||||
; CHECK: [[PTR_ADD81:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY65]], [[C79]](s64)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR15]](<4 x s32>), [[PTR_ADD81]](p1) :: (store (s128) into unknown-address + 240, align 4, addrspace 1)
|
||||
; CHECK: G_STORE [[BUILD_VECTOR15]](<4 x s32>), [[PTR_ADD81]](p1) :: (store (<4 x s32>) into unknown-address + 240, align 4, addrspace 1)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(s32) = COPY $sgpr2
|
||||
%2:_(<64 x s32>) = G_LOAD %0 :: (load (<64 x s32>), align 4, addrspace 4)
|
||||
|
@ -1539,7 +1539,7 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_constant_s160_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 4, addrspace 4)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 16, addrspace 4)
|
||||
@ -1549,7 +1549,7 @@ body: |
|
||||
; CI: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; VI-LABEL: name: test_load_constant_s160_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 4, addrspace 4)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 16, addrspace 4)
|
||||
@ -1559,7 +1559,7 @@ body: |
|
||||
; VI: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; GFX9-LABEL: name: test_load_constant_s160_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 4, addrspace 4)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p4) :: (load (s32) from unknown-address + 16, addrspace 4)
|
||||
@ -1580,10 +1580,10 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_constant_s224_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 4, addrspace 4)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s96) from unknown-address + 16, align 4, addrspace 4)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 4)
|
||||
; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; CI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; CI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -1599,10 +1599,10 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; VI-LABEL: name: test_load_constant_s224_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 4, addrspace 4)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s96) from unknown-address + 16, align 4, addrspace 4)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 4)
|
||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -1618,10 +1618,10 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; GFX9-LABEL: name: test_load_constant_s224_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 4, addrspace 4)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 4, addrspace 4)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s96) from unknown-address + 16, align 4, addrspace 4)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 4)
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -5852,7 +5852,7 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_constant_v3s64_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (s128), align 8, addrspace 4)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>), align 8, addrspace 4)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load (s64) from unknown-address + 16, addrspace 4)
|
||||
@ -5863,7 +5863,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_constant_v3s64_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (s128), align 8, addrspace 4)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>), align 8, addrspace 4)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load (s64) from unknown-address + 16, addrspace 4)
|
||||
@ -5874,7 +5874,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_constant_v3s64_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p4) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (s128), align 8, addrspace 4)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>), align 8, addrspace 4)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p4) :: (load (s64) from unknown-address + 16, addrspace 4)
|
||||
|
@ -1261,7 +1261,7 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_s160_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 4)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
|
||||
@ -1271,7 +1271,7 @@ body: |
|
||||
; CI: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; VI-LABEL: name: test_load_flat_s160_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 4)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
|
||||
@ -1281,7 +1281,7 @@ body: |
|
||||
; VI: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; GFX9-LABEL: name: test_load_flat_s160_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 4)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32) from unknown-address + 16)
|
||||
@ -1302,10 +1302,10 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_s224_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 4)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s96) from unknown-address + 16, align 4)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
|
||||
; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; CI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; CI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -1321,10 +1321,10 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; VI-LABEL: name: test_load_flat_s224_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 4)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s96) from unknown-address + 16, align 4)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
|
||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -1340,10 +1340,10 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; GFX9-LABEL: name: test_load_flat_s224_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 4)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 4)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s96) from unknown-address + 16, align 4)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<3 x s32>) from unknown-address + 16, align 4)
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -1742,28 +1742,28 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_s256_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128))
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](s256)
|
||||
; VI-LABEL: name: test_load_flat_s256_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128))
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; VI: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](s256)
|
||||
; GFX9-LABEL: name: test_load_flat_s256_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128))
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX9: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](s256)
|
||||
@ -3312,10 +3312,10 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v32s8_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; CI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; CI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s32>)
|
||||
; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
@ -3397,10 +3397,10 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<32 x s8>)
|
||||
; VI-LABEL: name: test_load_flat_v32s8_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s32>)
|
||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
@ -3482,10 +3482,10 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<32 x s8>)
|
||||
; GFX9-LABEL: name: test_load_flat_v32s8_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<4 x s32>)
|
||||
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
@ -4655,26 +4655,26 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v8s32_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; VI-LABEL: name: test_load_flat_v8s32_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX9-LABEL: name: test_load_flat_v8s32_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
%0:_(p0) = COPY $vgpr0_vgpr1
|
||||
@ -4690,44 +4690,44 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v16s32_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; CI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; CI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from unknown-address + 32, align 32)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
|
||||
; CI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
|
||||
; CI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from unknown-address + 48)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; VI-LABEL: name: test_load_flat_v16s32_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; VI: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from unknown-address + 32, align 32)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
|
||||
; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
|
||||
; VI: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; VI: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from unknown-address + 48)
|
||||
; VI: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; GFX9-LABEL: name: test_load_flat_v16s32_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
|
||||
; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from unknown-address + 32, align 32)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<4 x s32>) from unknown-address + 32, align 32)
|
||||
; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
|
||||
; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; GFX9: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from unknown-address + 48)
|
||||
; GFX9: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p0) :: (load (<4 x s32>) from unknown-address + 48)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
%0:_(p0) = COPY $vgpr0_vgpr1
|
||||
@ -5339,7 +5339,7 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v3s64_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
|
||||
@ -5350,7 +5350,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_flat_v3s64_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
|
||||
@ -5361,7 +5361,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_flat_v3s64_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16, align 16)
|
||||
@ -5385,7 +5385,7 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v3s64_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 8)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
|
||||
@ -5396,7 +5396,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_flat_v3s64_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 8)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
|
||||
@ -5407,7 +5407,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_flat_v3s64_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 8)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p0) :: (load (s64) from unknown-address + 16)
|
||||
@ -5960,26 +5960,26 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v4s64_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_flat_v4s64_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_flat_v4s64_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 32)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
%0:_(p0) = COPY $vgpr0_vgpr1
|
||||
@ -5995,26 +5995,26 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v4s64_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 8)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16, align 8)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_flat_v4s64_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 8)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16, align 8)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_flat_v4s64_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (s128), align 8)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>), align 8)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16, align 8)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16, align 8)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
%0:_(p0) = COPY $vgpr0_vgpr1
|
||||
@ -6712,28 +6712,28 @@ body: |
|
||||
|
||||
; CI-LABEL: name: test_load_flat_v2s128_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; CI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<2 x s128>)
|
||||
; VI-LABEL: name: test_load_flat_v2s128_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; VI: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<2 x s128>)
|
||||
; GFX9-LABEL: name: test_load_flat_v2s128_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p0) = COPY $vgpr0_vgpr1
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (s128), align 32)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>), align 32)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (<4 x s32>) from unknown-address + 16)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s128>) = G_BITCAST [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BITCAST]](<2 x s128>)
|
||||
|
@ -1676,7 +1676,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_s96_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (s64), addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, align 8, addrspace 1)
|
||||
@ -1722,7 +1722,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_s96_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (s64), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
|
||||
@ -2287,7 +2287,7 @@ body: |
|
||||
; CI: S_NOP 0, implicit [[TRUNC]](s160)
|
||||
; SI-LABEL: name: test_load_global_s160_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
|
||||
@ -2297,7 +2297,7 @@ body: |
|
||||
; SI: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; CI-HSA-LABEL: name: test_load_global_s160_align4
|
||||
; CI-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
|
||||
@ -2307,7 +2307,7 @@ body: |
|
||||
; CI-HSA: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; CI-MESA-LABEL: name: test_load_global_s160_align4
|
||||
; CI-MESA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; CI-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
|
||||
@ -2317,7 +2317,7 @@ body: |
|
||||
; CI-MESA: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; VI-LABEL: name: test_load_global_s160_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
|
||||
@ -2327,7 +2327,7 @@ body: |
|
||||
; VI: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; GFX9-HSA-LABEL: name: test_load_global_s160_align4
|
||||
; GFX9-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
|
||||
@ -2337,7 +2337,7 @@ body: |
|
||||
; GFX9-HSA: S_NOP 0, implicit [[BITCAST]](s160)
|
||||
; GFX9-MESA-LABEL: name: test_load_global_s160_align4
|
||||
; GFX9-MESA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 16, addrspace 1)
|
||||
@ -2358,10 +2358,10 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_s224_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, align 4, addrspace 1)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32) from unknown-address + 24, addrspace 1)
|
||||
@ -2380,10 +2380,10 @@ body: |
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; CI-HSA-LABEL: name: test_load_global_s224_align4
|
||||
; CI-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s96) from unknown-address + 16, align 4, addrspace 1)
|
||||
; CI-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; CI-HSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; CI-HSA: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; CI-HSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -2399,10 +2399,10 @@ body: |
|
||||
; CI-HSA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; CI-MESA-LABEL: name: test_load_global_s224_align4
|
||||
; CI-MESA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; CI-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s96) from unknown-address + 16, align 4, addrspace 1)
|
||||
; CI-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; CI-MESA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; CI-MESA: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; CI-MESA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -2418,10 +2418,10 @@ body: |
|
||||
; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; VI-LABEL: name: test_load_global_s224_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s96) from unknown-address + 16, align 4, addrspace 1)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; VI: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -2437,10 +2437,10 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; GFX9-HSA-LABEL: name: test_load_global_s224_align4
|
||||
; GFX9-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s96) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX9-HSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; GFX9-HSA: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; GFX9-HSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -2456,10 +2456,10 @@ body: |
|
||||
; GFX9-HSA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](s256)
|
||||
; GFX9-MESA-LABEL: name: test_load_global_s224_align4
|
||||
; GFX9-MESA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128), align 4, addrspace 1)
|
||||
; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s96) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<3 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX9-MESA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
|
||||
; GFX9-MESA: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](<3 x s32>)
|
||||
; GFX9-MESA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
|
||||
@ -7571,7 +7571,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_v5s16_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load (s64), addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load (<4 x s16>), addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 8, align 8, addrspace 1)
|
||||
@ -7692,7 +7692,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_v5s16_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load (s64), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p1) :: (load (<4 x s16>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s16) from unknown-address + 8, align 4, addrspace 1)
|
||||
@ -8468,7 +8468,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_v6s16_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (s64), addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, align 8, addrspace 1)
|
||||
@ -8514,7 +8514,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_v6s16_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (s64), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
|
||||
@ -11671,7 +11671,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_v3s32_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (s64), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
|
||||
@ -12859,7 +12859,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_global_v3s64_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 8, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
|
||||
@ -12870,7 +12870,7 @@ body: |
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; CI-HSA-LABEL: name: test_load_global_v3s64_align8
|
||||
; CI-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 8, addrspace 1)
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
|
||||
; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
|
||||
@ -12881,7 +12881,7 @@ body: |
|
||||
; CI-HSA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; CI-MESA-LABEL: name: test_load_global_v3s64_align8
|
||||
; CI-MESA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 8, addrspace 1)
|
||||
; CI-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
|
||||
; CI-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
|
||||
@ -12892,7 +12892,7 @@ body: |
|
||||
; CI-MESA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_global_v3s64_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 8, addrspace 1)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
|
||||
; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
|
||||
@ -12903,7 +12903,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-HSA-LABEL: name: test_load_global_v3s64_align8
|
||||
; GFX9-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 8, addrspace 1)
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
|
||||
; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
|
||||
@ -12914,7 +12914,7 @@ body: |
|
||||
; GFX9-HSA: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-MESA-LABEL: name: test_load_global_v3s64_align8
|
||||
; GFX9-MESA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 8, addrspace 1)
|
||||
; GFX9-MESA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 8, addrspace 1)
|
||||
; GFX9-MESA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-MESA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-MESA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, addrspace 1)
|
||||
@ -13126,7 +13126,7 @@ body: |
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; CI-HSA-LABEL: name: test_load_global_v3s64_align1
|
||||
; CI-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 1, addrspace 1)
|
||||
; CI-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 1, addrspace 1)
|
||||
; CI-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; CI-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CI-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, align 1, addrspace 1)
|
||||
@ -13489,7 +13489,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-HSA-LABEL: name: test_load_global_v3s64_align1
|
||||
; GFX9-HSA: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128), align 1, addrspace 1)
|
||||
; GFX9-HSA: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>), align 1, addrspace 1)
|
||||
; GFX9-HSA: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
|
||||
; GFX9-HSA: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GFX9-HSA: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 16, align 1, addrspace 1)
|
||||
@ -17123,7 +17123,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_global_v2s96_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (s64), align 4, addrspace 1)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p1) :: (load (<2 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p1) :: (load (s32) from unknown-address + 8, addrspace 1)
|
||||
@ -17132,7 +17132,7 @@ body: |
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s64) from unknown-address + 12, align 4, addrspace 1)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 1)
|
||||
; SI: [[PTR_ADD2:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p1) :: (load (s32) from unknown-address + 20, addrspace 1)
|
||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17223,7 +17223,7 @@ body: |
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[EXTRACT]](<3 x s32>)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s64) from unknown-address + 12, align 4, addrspace 1)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 1)
|
||||
; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p1) :: (load (s32) from unknown-address + 20, addrspace 1)
|
||||
|
@ -2889,7 +2889,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_s96_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2899,7 +2899,7 @@ body: |
|
||||
; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; CI-LABEL: name: test_load_local_s96_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2909,7 +2909,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; CI-DS128-LABEL: name: test_load_local_s96_align8
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2919,7 +2919,7 @@ body: |
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; VI-LABEL: name: test_load_local_s96_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2929,7 +2929,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; GFX9-LABEL: name: test_load_local_s96_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2944,7 +2944,7 @@ body: |
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; GFX10-LABEL: name: test_load_local_s96_align8
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2954,7 +2954,7 @@ body: |
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_s96_align8
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -2975,7 +2975,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_s96_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -2985,7 +2985,7 @@ body: |
|
||||
; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; CI-LABEL: name: test_load_local_s96_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -2995,7 +2995,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; CI-DS128-LABEL: name: test_load_local_s96_align4
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -3005,7 +3005,7 @@ body: |
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; VI-LABEL: name: test_load_local_s96_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -3015,7 +3015,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; GFX9-LABEL: name: test_load_local_s96_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -3030,7 +3030,7 @@ body: |
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; GFX10-LABEL: name: test_load_local_s96_align4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -3040,7 +3040,7 @@ body: |
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2 = COPY [[BITCAST]](s96)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_s96_align4
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -4637,19 +4637,19 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_s128_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128)
|
||||
; CI-LABEL: name: test_load_local_s128_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128)
|
||||
@ -4696,19 +4696,19 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_s128_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128)
|
||||
; CI-LABEL: name: test_load_local_s128_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(s128) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](s128)
|
||||
@ -11837,7 +11837,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v3s32_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11846,7 +11846,7 @@ body: |
|
||||
; SI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
|
||||
; CI-LABEL: name: test_load_local_v3s32_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11855,7 +11855,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v3s32_align4
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11864,7 +11864,7 @@ body: |
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
|
||||
; VI-LABEL: name: test_load_local_v3s32_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11873,7 +11873,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
|
||||
; GFX9-LABEL: name: test_load_local_v3s32_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11886,7 +11886,7 @@ body: |
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2 = COPY [[LOAD]](<3 x s32>)
|
||||
; GFX10-LABEL: name: test_load_local_v3s32_align4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11895,7 +11895,7 @@ body: |
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_v3s32_align4
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -11915,18 +11915,18 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v4s32_align16
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 16, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI-LABEL: name: test_load_local_v4s32_align16
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 16, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v4s32_align16
|
||||
@ -11966,18 +11966,18 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v4s32_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI-LABEL: name: test_load_local_v4s32_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v4s32_align8
|
||||
@ -12017,18 +12017,18 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v4s32_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI-LABEL: name: test_load_local_v4s32_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v4s32_align4
|
||||
@ -13200,78 +13200,78 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v8s32_align32
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
|
||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
||||
; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s64) from unknown-address + 24, addrspace 3)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; CI-LABEL: name: test_load_local_v8s32_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
|
||||
; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
||||
; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s64) from unknown-address + 24, addrspace 3)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v8s32_align32
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; CI-DS128: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; VI-LABEL: name: test_load_local_v8s32_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX9-LABEL: name: test_load_local_v8s32_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX9-UNALIGNED-LABEL: name: test_load_local_v8s32_align32
|
||||
; GFX9-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX10-LABEL: name: test_load_local_v8s32_align32
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_v8s32_align32
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX10-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<8 x s32>)
|
||||
%0:_(p3) = COPY $vgpr0
|
||||
@ -13287,138 +13287,138 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v16s32_align32
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
|
||||
; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
||||
; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s64) from unknown-address + 24, addrspace 3)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
|
||||
; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; SI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
|
||||
; SI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load (s64) from unknown-address + 32, align 32, addrspace 3)
|
||||
; SI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load (<2 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
|
||||
; SI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
|
||||
; SI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load (s64) from unknown-address + 40, addrspace 3)
|
||||
; SI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load (<2 x s32>) from unknown-address + 40, addrspace 3)
|
||||
; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; SI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
|
||||
; SI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load (s64) from unknown-address + 48, align 16, addrspace 3)
|
||||
; SI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load (<2 x s32>) from unknown-address + 48, align 16, addrspace 3)
|
||||
; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
|
||||
; SI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
|
||||
; SI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load (s64) from unknown-address + 56, addrspace 3)
|
||||
; SI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load (<2 x s32>) from unknown-address + 56, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; CI-LABEL: name: test_load_local_v16s32_align32
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 32, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 32, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, addrspace 3)
|
||||
; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 16, align 16, addrspace 3)
|
||||
; CI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
|
||||
; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s64) from unknown-address + 24, addrspace 3)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<2 x s32>) from unknown-address + 24, addrspace 3)
|
||||
; CI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; CI: [[PTR_ADD3:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C3]](s32)
|
||||
; CI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load (s64) from unknown-address + 32, align 32, addrspace 3)
|
||||
; CI: [[LOAD4:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD3]](p3) :: (load (<2 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; CI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
|
||||
; CI: [[PTR_ADD4:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C4]](s32)
|
||||
; CI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load (s64) from unknown-address + 40, addrspace 3)
|
||||
; CI: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD4]](p3) :: (load (<2 x s32>) from unknown-address + 40, addrspace 3)
|
||||
; CI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; CI: [[PTR_ADD5:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C5]](s32)
|
||||
; CI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load (s64) from unknown-address + 48, align 16, addrspace 3)
|
||||
; CI: [[LOAD6:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD5]](p3) :: (load (<2 x s32>) from unknown-address + 48, align 16, addrspace 3)
|
||||
; CI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
|
||||
; CI: [[PTR_ADD6:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C6]](s32)
|
||||
; CI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load (s64) from unknown-address + 56, addrspace 3)
|
||||
; CI: [[LOAD7:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD6]](p3) :: (load (<2 x s32>) from unknown-address + 56, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>), [[LOAD2]](<2 x s32>), [[LOAD3]](<2 x s32>), [[LOAD4]](<2 x s32>), [[LOAD5]](<2 x s32>), [[LOAD6]](<2 x s32>), [[LOAD7]](<2 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v16s32_align32
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; CI-DS128: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s128) from unknown-address + 32, align 32, addrspace 3)
|
||||
; CI-DS128: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; CI-DS128: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; CI-DS128: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s128) from unknown-address + 48, addrspace 3)
|
||||
; CI-DS128: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
|
||||
; CI-DS128: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; VI-LABEL: name: test_load_local_v16s32_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s128) from unknown-address + 32, align 32, addrspace 3)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; VI: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s128) from unknown-address + 48, addrspace 3)
|
||||
; VI: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; GFX9-LABEL: name: test_load_local_v16s32_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s128) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; GFX9: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s128) from unknown-address + 48, addrspace 3)
|
||||
; GFX9: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; GFX9-UNALIGNED-LABEL: name: test_load_local_v16s32_align32
|
||||
; GFX9-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; GFX9-UNALIGNED: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX9-UNALIGNED: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s128) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; GFX9-UNALIGNED: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; GFX9-UNALIGNED: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s128) from unknown-address + 48, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; GFX10-LABEL: name: test_load_local_v16s32_align32
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX10: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; GFX10: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX10: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s128) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX10: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX10: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; GFX10: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; GFX10: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s128) from unknown-address + 48, addrspace 3)
|
||||
; GFX10: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_v16s32_align32
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p3) :: (load (<4 x s32>), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<4 x s32>) from unknown-address + 16, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
|
||||
; GFX10-UNALIGNED: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s128) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD2:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<4 x s32>) from unknown-address + 32, align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
|
||||
; GFX10-UNALIGNED: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C2]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (s128) from unknown-address + 48, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD3:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD2]](p3) :: (load (<4 x s32>) from unknown-address + 48, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX10-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY [[CONCAT_VECTORS]](<16 x s32>)
|
||||
%0:_(p3) = COPY $vgpr0
|
||||
@ -14378,7 +14378,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v3s64_align32
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
@ -14389,7 +14389,7 @@ body: |
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_local_v3s64_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
@ -14400,7 +14400,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_local_v3s64_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
@ -14411,7 +14411,7 @@ body: |
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX9-UNALIGNED-LABEL: name: test_load_local_v3s64_align32
|
||||
; GFX9-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
@ -14422,7 +14422,7 @@ body: |
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX10-LABEL: name: test_load_local_v3s64_align32
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
@ -14433,7 +14433,7 @@ body: |
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<4 x s64>)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_v3s64_align32
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 16, align 16, addrspace 3)
|
||||
@ -14485,50 +14485,50 @@ body: |
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<4 x s64>)
|
||||
; CI-DS128-LABEL: name: test_load_local_v4s64_align32
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
|
||||
; CI-DS128: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; CI-DS128: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; VI-LABEL: name: test_load_local_v4s64_align32
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GFX9-LABEL: name: test_load_local_v4s64_align32
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GFX9-UNALIGNED-LABEL: name: test_load_local_v4s64_align32
|
||||
; GFX9-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX9-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
|
||||
; GFX9-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; GFX9-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GFX10-LABEL: name: test_load_local_v4s64_align32
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
|
||||
; GFX10: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; GFX10: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_v4s64_align32
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (s128), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p3) :: (load (<2 x s64>), align 32, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (s128) from unknown-address + 16, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s64>) from unknown-address + 16, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
; GFX10-UNALIGNED: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[CONCAT_VECTORS]](<4 x s64>)
|
||||
%0:_(p3) = COPY $vgpr0
|
||||
@ -14544,19 +14544,19 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v2p1_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>)
|
||||
; CI-LABEL: name: test_load_local_v2p1_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 8, align 4, addrspace 3)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 8, align 4, addrspace 3)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[LOAD]](<2 x s32>), [[LOAD1]](<2 x s32>)
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(<2 x p1>) = G_BITCAST [[CONCAT_VECTORS]](<4 x s32>)
|
||||
; CI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BITCAST]](<2 x p1>)
|
||||
@ -16955,7 +16955,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v2s96_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -16964,7 +16964,7 @@ body: |
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -16976,7 +16976,7 @@ body: |
|
||||
; SI: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; CI-LABEL: name: test_load_local_v2s96_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -16985,7 +16985,7 @@ body: |
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; CI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -16997,7 +16997,7 @@ body: |
|
||||
; CI: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; CI-DS128-LABEL: name: test_load_local_v2s96_align4
|
||||
; CI-DS128: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; CI-DS128: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -17006,7 +17006,7 @@ body: |
|
||||
; CI-DS128: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; CI-DS128: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI-DS128: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI-DS128: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; CI-DS128: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17018,7 +17018,7 @@ body: |
|
||||
; CI-DS128: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; VI-LABEL: name: test_load_local_v2s96_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -17027,7 +17027,7 @@ body: |
|
||||
; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; VI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17039,7 +17039,7 @@ body: |
|
||||
; VI: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; GFX9-LABEL: name: test_load_local_v2s96_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -17048,7 +17048,7 @@ body: |
|
||||
; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX9: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; GFX9: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17072,7 +17072,7 @@ body: |
|
||||
; GFX9-UNALIGNED: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; GFX10-LABEL: name: test_load_local_v2s96_align4
|
||||
; GFX10: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX10: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -17081,7 +17081,7 @@ body: |
|
||||
; GFX10: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; GFX10: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; GFX10: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX10: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; GFX10: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; GFX10: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17093,7 +17093,7 @@ body: |
|
||||
; GFX10: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; GFX10-UNALIGNED-LABEL: name: test_load_local_v2s96_align4
|
||||
; GFX10-UNALIGNED: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, addrspace 3)
|
||||
@ -17102,7 +17102,7 @@ body: |
|
||||
; GFX10-UNALIGNED: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; GFX10-UNALIGNED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; GFX10-UNALIGNED: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17128,7 +17128,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_local_v2s96_align16
|
||||
; SI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 16, addrspace 3)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -17137,7 +17137,7 @@ body: |
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; SI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; SI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; SI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17149,7 +17149,7 @@ body: |
|
||||
; SI: $vgpr3_vgpr4_vgpr5 = COPY [[COPY2]](s96)
|
||||
; CI-LABEL: name: test_load_local_v2s96_align16
|
||||
; CI: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (s64), align 16, addrspace 3)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p3) :: (load (<2 x s32>), align 16, addrspace 3)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p3) :: (load (s32) from unknown-address + 8, align 8, addrspace 3)
|
||||
@ -17158,7 +17158,7 @@ body: |
|
||||
; CI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[BUILD_VECTOR]](<3 x s32>)
|
||||
; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; CI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C1]](s32)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI: [[LOAD2:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD1]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI: [[PTR_ADD2:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32)
|
||||
; CI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
; CI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD2]](<2 x s32>)
|
||||
@ -17174,7 +17174,7 @@ body: |
|
||||
; CI-DS128: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
|
||||
; CI-DS128: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; CI-DS128: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI-DS128: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; CI-DS128: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; CI-DS128: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
|
||||
; CI-DS128: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
@ -17191,7 +17191,7 @@ body: |
|
||||
; VI: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; VI: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
|
||||
; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
@ -17208,7 +17208,7 @@ body: |
|
||||
; GFX9: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX9: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
|
||||
; GFX9: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
@ -17237,7 +17237,7 @@ body: |
|
||||
; GFX10: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
|
||||
; GFX10: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; GFX10: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
|
||||
; GFX10: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
@ -17254,7 +17254,7 @@ body: |
|
||||
; GFX10-UNALIGNED: [[BITCAST:%[0-9]+]]:_(s96) = G_BITCAST [[LOAD]](<3 x s32>)
|
||||
; GFX10-UNALIGNED: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
|
||||
; GFX10-UNALIGNED: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (s64) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[PTR_ADD]](p3) :: (load (<2 x s32>) from unknown-address + 12, align 4, addrspace 3)
|
||||
; GFX10-UNALIGNED: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
|
||||
; GFX10-UNALIGNED: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
|
||||
; GFX10-UNALIGNED: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p3) :: (load (s32) from unknown-address + 20, addrspace 3)
|
||||
|
@ -5478,7 +5478,7 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_private_v3s16_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
|
||||
@ -5507,7 +5507,7 @@ body: |
|
||||
; SI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
|
||||
; CI-LABEL: name: test_load_private_v3s16_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
|
||||
@ -5536,7 +5536,7 @@ body: |
|
||||
; CI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
|
||||
; VI-LABEL: name: test_load_private_v3s16_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
|
||||
@ -5565,7 +5565,7 @@ body: |
|
||||
; VI: $vgpr0_vgpr1 = COPY [[INSERT]](<4 x s16>)
|
||||
; GFX9-LABEL: name: test_load_private_v3s16_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p5) :: (load (s16) from unknown-address + 4, align 4, addrspace 5)
|
||||
@ -5954,34 +5954,34 @@ body: |
|
||||
liveins: $vgpr0
|
||||
; SI-LABEL: name: test_load_private_v4s16_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; SI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
; CI-LABEL: name: test_load_private_v4s16_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; CI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
; VI-LABEL: name: test_load_private_v4s16_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; VI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
; GFX9-LABEL: name: test_load_private_v4s16_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), align 8, addrspace 5)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
%0:_(p5) = COPY $vgpr0
|
||||
@ -5997,34 +5997,34 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_private_v4s16_align4
|
||||
; SI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; SI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; SI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
; CI-LABEL: name: test_load_private_v4s16_align4
|
||||
; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; CI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; CI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
; VI-LABEL: name: test_load_private_v4s16_align4
|
||||
; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; VI: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; VI: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
; GFX9-LABEL: name: test_load_private_v4s16_align4
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (s32), addrspace 5)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p5) :: (load (<2 x s16>), addrspace 5)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[PTR_ADD]](p5) :: (load (<2 x s16>) from unknown-address + 4, addrspace 5)
|
||||
; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
|
||||
; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
|
||||
%0:_(p5) = COPY $vgpr0
|
||||
@ -9613,34 +9613,34 @@ body: |
|
||||
|
||||
; SI-LABEL: name: test_load_private_v2p3_align8
|
||||
; SI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; SI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; SI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (p3), align 8, addrspace 5)
|
||||
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; SI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (p3) from unknown-address + 4, addrspace 5)
|
||||
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3)
|
||||
; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>)
|
||||
; CI-LABEL: name: test_load_private_v2p3_align8
|
||||
; CI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; CI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; CI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (p3), align 8, addrspace 5)
|
||||
; CI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; CI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; CI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (p3) from unknown-address + 4, addrspace 5)
|
||||
; CI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3)
|
||||
; CI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>)
|
||||
; VI-LABEL: name: test_load_private_v2p3_align8
|
||||
; VI: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; VI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; VI: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (p3), align 8, addrspace 5)
|
||||
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; VI: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; VI: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (p3) from unknown-address + 4, addrspace 5)
|
||||
; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3)
|
||||
; VI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>)
|
||||
; GFX9-LABEL: name: test_load_private_v2p3_align8
|
||||
; GFX9: [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (s32), align 8, addrspace 5)
|
||||
; GFX9: [[LOAD:%[0-9]+]]:_(p3) = G_LOAD [[COPY]](p5) :: (load (p3), align 8, addrspace 5)
|
||||
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
; GFX9: [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (s32) from unknown-address + 4, addrspace 5)
|
||||
; GFX9: [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD]](p5) :: (load (p3) from unknown-address + 4, addrspace 5)
|
||||
; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[LOAD]](p3), [[LOAD1]](p3)
|
||||
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x p3>)
|
||||
%0:_(p5) = COPY $vgpr0
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -145,7 +145,7 @@ body: |
|
||||
; SI: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4
|
||||
; SI: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[COPY1]](<3 x s32>), 0
|
||||
; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY1]](<3 x s32>), 64
|
||||
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store (s64), align 4, addrspace 1)
|
||||
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY]](p1) :: (store (<2 x s32>), align 4, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, addrspace 1)
|
||||
@ -345,7 +345,7 @@ body: |
|
||||
; SI: [[BITCAST:%[0-9]+]]:_(<3 x s32>) = G_BITCAST [[COPY]](s96)
|
||||
; SI: [[EXTRACT:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT [[BITCAST]](<3 x s32>), 0
|
||||
; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[BITCAST]](<3 x s32>), 64
|
||||
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY1]](p1) :: (store (s64), align 16, addrspace 1)
|
||||
; SI: G_STORE [[EXTRACT]](<2 x s32>), [[COPY1]](p1) :: (store (<2 x s32>), align 16, addrspace 1)
|
||||
; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY1]], [[C]](s64)
|
||||
; SI: G_STORE [[EXTRACT1]](s32), [[PTR_ADD]](p1) :: (store (s32) into unknown-address + 8, align 8, addrspace 1)
|
||||
|
@ -797,10 +797,10 @@ define amdgpu_ps void @s_buffer_load_v16i16_vgpr_offset(<4 x i32> inreg %rsrc, i
|
||||
; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load (s128), align 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s16>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<8 x s16>), [[AMDGPU_BUFFER_LOAD1]](<8 x s16>)
|
||||
; CHECK: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s16>)
|
||||
; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (s128) into `<16 x i16> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `<16 x i16> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (s128) into `<16 x i16> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (<8 x s16>) into `<16 x i16> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; CHECK: S_ENDPGM 0
|
||||
; GREEDY-LABEL: name: s_buffer_load_v16i16_vgpr_offset
|
||||
; GREEDY: bb.1 (%ir-block.0):
|
||||
@ -818,10 +818,10 @@ define amdgpu_ps void @s_buffer_load_v16i16_vgpr_offset(<4 x i32> inreg %rsrc, i
|
||||
; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load (s128), align 4)
|
||||
; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s16>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<8 x s16>), [[AMDGPU_BUFFER_LOAD1]](<8 x s16>)
|
||||
; GREEDY: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<16 x s16>)
|
||||
; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (s128) into `<16 x i16> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `<16 x i16> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (s128) into `<16 x i16> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (<8 x s16>) into `<16 x i16> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; GREEDY: S_ENDPGM 0
|
||||
%val = call <16 x i16> @llvm.amdgcn.s.buffer.load.v16i16(<4 x i32> %rsrc, i32 %soffset, i32 0)
|
||||
store <16 x i16> %val, <16 x i16> addrspace(1)* undef
|
||||
@ -848,16 +848,16 @@ define amdgpu_ps void @s_buffer_load_v32i16_vgpr_offset(<4 x i32> inreg %rsrc, i
|
||||
; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load (s128) from unknown-address + 48, align 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<32 x s16>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<8 x s16>), [[AMDGPU_BUFFER_LOAD1]](<8 x s16>), [[AMDGPU_BUFFER_LOAD2]](<8 x s16>), [[AMDGPU_BUFFER_LOAD3]](<8 x s16>)
|
||||
; CHECK: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>), [[UV2:%[0-9]+]]:vgpr(<8 x s16>), [[UV3:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<32 x s16>)
|
||||
; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
|
||||
; CHECK: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; CHECK: S_ENDPGM 0
|
||||
; GREEDY-LABEL: name: s_buffer_load_v32i16_vgpr_offset
|
||||
; GREEDY: bb.1 (%ir-block.0):
|
||||
@ -877,16 +877,16 @@ define amdgpu_ps void @s_buffer_load_v32i16_vgpr_offset(<4 x i32> inreg %rsrc, i
|
||||
; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<8 x s16>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load (s128) from unknown-address + 48, align 4)
|
||||
; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<32 x s16>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<8 x s16>), [[AMDGPU_BUFFER_LOAD1]](<8 x s16>), [[AMDGPU_BUFFER_LOAD2]](<8 x s16>), [[AMDGPU_BUFFER_LOAD3]](<8 x s16>)
|
||||
; GREEDY: [[UV:%[0-9]+]]:vgpr(<8 x s16>), [[UV1:%[0-9]+]]:vgpr(<8 x s16>), [[UV2:%[0-9]+]]:vgpr(<8 x s16>), [[UV3:%[0-9]+]]:vgpr(<8 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<32 x s16>)
|
||||
; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; GREEDY: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
|
||||
; GREEDY: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
|
||||
; GREEDY: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; GREEDY: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
|
||||
; GREEDY: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
|
||||
; GREEDY: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store (s128) into `<32 x i16> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store (<8 x s16>) into `<32 x i16> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; GREEDY: S_ENDPGM 0
|
||||
%val = call <32 x i16> @llvm.amdgcn.s.buffer.load.v32i16(<4 x i32> %rsrc, i32 %soffset, i32 0)
|
||||
store <32 x i16> %val, <32 x i16> addrspace(1)* undef
|
||||
@ -911,10 +911,10 @@ define amdgpu_ps void @s_buffer_load_v4i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
|
||||
; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load (s128), align 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x s64>), [[AMDGPU_BUFFER_LOAD1]](<2 x s64>)
|
||||
; CHECK: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (s128) into `<4 x i64> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `<4 x i64> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (s128) into `<4 x i64> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (<2 x s64>) into `<4 x i64> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; CHECK: S_ENDPGM 0
|
||||
; GREEDY-LABEL: name: s_buffer_load_v4i64_vgpr_offset
|
||||
; GREEDY: bb.1 (%ir-block.0):
|
||||
@ -932,10 +932,10 @@ define amdgpu_ps void @s_buffer_load_v4i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
|
||||
; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load (s128), align 4)
|
||||
; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x s64>), [[AMDGPU_BUFFER_LOAD1]](<2 x s64>)
|
||||
; GREEDY: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>)
|
||||
; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (s128) into `<4 x i64> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `<4 x i64> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (s128) into `<4 x i64> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (<2 x s64>) into `<4 x i64> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; GREEDY: S_ENDPGM 0
|
||||
%val = call <4 x i64> @llvm.amdgcn.s.buffer.load.v4i64(<4 x i32> %rsrc, i32 %soffset, i32 0)
|
||||
store <4 x i64> %val, <4 x i64> addrspace(1)* undef
|
||||
@ -962,16 +962,16 @@ define amdgpu_ps void @s_buffer_load_v8i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
|
||||
; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load (s128) from unknown-address + 48, align 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x s64>), [[AMDGPU_BUFFER_LOAD1]](<2 x s64>), [[AMDGPU_BUFFER_LOAD2]](<2 x s64>), [[AMDGPU_BUFFER_LOAD3]](<2 x s64>)
|
||||
; CHECK: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>), [[UV2:%[0-9]+]]:vgpr(<2 x s64>), [[UV3:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>)
|
||||
; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
|
||||
; CHECK: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; CHECK: S_ENDPGM 0
|
||||
; GREEDY-LABEL: name: s_buffer_load_v8i64_vgpr_offset
|
||||
; GREEDY: bb.1 (%ir-block.0):
|
||||
@ -991,16 +991,16 @@ define amdgpu_ps void @s_buffer_load_v8i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
|
||||
; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load (s128) from unknown-address + 48, align 4)
|
||||
; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x s64>), [[AMDGPU_BUFFER_LOAD1]](<2 x s64>), [[AMDGPU_BUFFER_LOAD2]](<2 x s64>), [[AMDGPU_BUFFER_LOAD3]](<2 x s64>)
|
||||
; GREEDY: [[UV:%[0-9]+]]:vgpr(<2 x s64>), [[UV1:%[0-9]+]]:vgpr(<2 x s64>), [[UV2:%[0-9]+]]:vgpr(<2 x s64>), [[UV3:%[0-9]+]]:vgpr(<2 x s64>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s64>)
|
||||
; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; GREEDY: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
|
||||
; GREEDY: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
|
||||
; GREEDY: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; GREEDY: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
|
||||
; GREEDY: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
|
||||
; GREEDY: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store (s128) into `<8 x i64> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store (<2 x s64>) into `<8 x i64> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; GREEDY: S_ENDPGM 0
|
||||
%val = call <8 x i64> @llvm.amdgcn.s.buffer.load.v8i64(<4 x i32> %rsrc, i32 %soffset, i32 0)
|
||||
store <8 x i64> %val, <8 x i64> addrspace(1)* undef
|
||||
@ -1025,10 +1025,10 @@ define amdgpu_ps void @s_buffer_load_v4p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
|
||||
; CHECK: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load (s128), align 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x p1>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x p1>), [[AMDGPU_BUFFER_LOAD1]](<2 x p1>)
|
||||
; CHECK: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x p1>)
|
||||
; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (s128) into `<4 x i8 addrspace(1)*> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `<4 x i8 addrspace(1)*> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (s128) into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (<2 x p1>) into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; CHECK: S_ENDPGM 0
|
||||
; GREEDY-LABEL: name: s_buffer_load_v4p1_vgpr_offset
|
||||
; GREEDY: bb.1 (%ir-block.0):
|
||||
@ -1046,10 +1046,10 @@ define amdgpu_ps void @s_buffer_load_v4p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
|
||||
; GREEDY: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 16, 0, 0 :: (dereferenceable invariant load (s128), align 4)
|
||||
; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x p1>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x p1>), [[AMDGPU_BUFFER_LOAD1]](<2 x p1>)
|
||||
; GREEDY: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x p1>)
|
||||
; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (s128) into `<4 x i8 addrspace(1)*> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `<4 x i8 addrspace(1)*> addrspace(1)* undef`, align 32, addrspace 1)
|
||||
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (s128) into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (<2 x p1>) into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
|
||||
; GREEDY: S_ENDPGM 0
|
||||
%val = call <4 x i8 addrspace(1)*> @llvm.amdgcn.s.buffer.load.v4p1i8(<4 x i32> %rsrc, i32 %soffset, i32 0)
|
||||
store <4 x i8 addrspace(1)*> %val, <4 x i8 addrspace(1)*> addrspace(1)* undef
|
||||
@ -1076,16 +1076,16 @@ define amdgpu_ps void @s_buffer_load_v8p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
|
||||
; CHECK: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load (s128) from unknown-address + 48, align 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x p1>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x p1>), [[AMDGPU_BUFFER_LOAD1]](<2 x p1>), [[AMDGPU_BUFFER_LOAD2]](<2 x p1>), [[AMDGPU_BUFFER_LOAD3]](<2 x p1>)
|
||||
; CHECK: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>), [[UV2:%[0-9]+]]:vgpr(<2 x p1>), [[UV3:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x p1>)
|
||||
; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
|
||||
; CHECK: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
|
||||
; CHECK: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; CHECK: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; CHECK: S_ENDPGM 0
|
||||
; GREEDY-LABEL: name: s_buffer_load_v8p1_vgpr_offset
|
||||
; GREEDY: bb.1 (%ir-block.0):
|
||||
@ -1105,16 +1105,16 @@ define amdgpu_ps void @s_buffer_load_v8p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
|
||||
; GREEDY: [[AMDGPU_BUFFER_LOAD3:%[0-9]+]]:vgpr(<2 x p1>) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[C1]](s32), [[COPY4]], [[C]], 48, 0, 0 :: (dereferenceable invariant load (s128) from unknown-address + 48, align 4)
|
||||
; GREEDY: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x p1>) = G_CONCAT_VECTORS [[AMDGPU_BUFFER_LOAD]](<2 x p1>), [[AMDGPU_BUFFER_LOAD1]](<2 x p1>), [[AMDGPU_BUFFER_LOAD2]](<2 x p1>), [[AMDGPU_BUFFER_LOAD3]](<2 x p1>)
|
||||
; GREEDY: [[UV:%[0-9]+]]:vgpr(<2 x p1>), [[UV1:%[0-9]+]]:vgpr(<2 x p1>), [[UV2:%[0-9]+]]:vgpr(<2 x p1>), [[UV3:%[0-9]+]]:vgpr(<2 x p1>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x p1>)
|
||||
; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1)
|
||||
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
|
||||
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
|
||||
; GREEDY: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
|
||||
; GREEDY: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
|
||||
; GREEDY: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
|
||||
; GREEDY: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
|
||||
; GREEDY: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
|
||||
; GREEDY: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store (s128) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; GREEDY: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store (<2 x p1>) into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
|
||||
; GREEDY: S_ENDPGM 0
|
||||
%val = call <8 x i8 addrspace(1)*> @llvm.amdgcn.s.buffer.load.v8p1i8(<4 x i32> %rsrc, i32 %soffset, i32 0)
|
||||
store <8 x i8 addrspace(1)*> %val, <8 x i8 addrspace(1)*> addrspace(1)* undef
|
||||
|
@ -116,10 +116,10 @@ body: |
|
||||
; CHECK-LABEL: name: load_global_v8i32_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128) from %ir.global.not.uniform.v8i32, align 32, addrspace 1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v8i32, align 32, addrspace 1)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s128) from %ir.global.not.uniform.v8i32 + 16, basealign 32, addrspace 1)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v8i32 + 16, basealign 32, addrspace 1)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(<8 x s32>) = G_LOAD %0 :: (load (<8 x s32>) from %ir.global.not.uniform.v8i32)
|
||||
@ -136,10 +136,10 @@ body: |
|
||||
; CHECK-LABEL: name: load_global_v4i64_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128) from %ir.global.not.uniform.v4i64, align 32, addrspace 1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v4i64, align 32, addrspace 1)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load (s128) from %ir.global.not.uniform.v4i64 + 16, basealign 32, addrspace 1)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v4i64 + 16, basealign 32, addrspace 1)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(<4 x s64>) = G_LOAD %0 :: (load (<4 x s64>) from %ir.global.not.uniform.v4i64)
|
||||
@ -155,16 +155,16 @@ body: |
|
||||
; CHECK-LABEL: name: load_global_v16i32_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (s128) from %ir.global.not.uniform.v16i32, align 64, addrspace 1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32, align 64, addrspace 1)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s128) from %ir.global.not.uniform.v16i32 + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32 + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s128) from %ir.global.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s128) from %ir.global.not.uniform.v16i32 + 48, basealign 64, addrspace 1)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<4 x s32>) from %ir.global.not.uniform.v16i32 + 48, basealign 64, addrspace 1)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(<16 x s32>) = G_LOAD %0 :: (load (<16 x s32>) from %ir.global.not.uniform.v16i32)
|
||||
@ -180,16 +180,16 @@ body: |
|
||||
; CHECK-LABEL: name: load_global_v8i64_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY [[COPY]](p1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (s128) from %ir.global.not.uniform.v8i64, align 64, addrspace 1)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64, align 64, addrspace 1)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load (s128) from %ir.global.not.uniform.v8i64 + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64 + 16, basealign 64, addrspace 1)
|
||||
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s128) from %ir.global.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 1)
|
||||
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s128) from %ir.global.not.uniform.v8i64 + 48, basealign 64, addrspace 1)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<2 x s64>) from %ir.global.not.uniform.v8i64 + 48, basealign 64, addrspace 1)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>), [[LOAD2]](<2 x s64>), [[LOAD3]](<2 x s64>)
|
||||
%0:_(p1) = COPY $sgpr0_sgpr1
|
||||
%1:_(<8 x s64>) = G_LOAD %0 :: (load (<8 x s64>) from %ir.global.not.uniform.v8i64)
|
||||
@ -261,10 +261,10 @@ body: |
|
||||
; CHECK-LABEL: name: load_constant_v8i32_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128) from %ir.constant.not.uniform.v8i32, align 32, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v8i32, align 32, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from %ir.constant.not.uniform.v8i32 + 16, basealign 32, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v8i32 + 16, basealign 32, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
%0:_(p4) = COPY $sgpr0_sgpr1
|
||||
%1:_(<8 x s32>) = G_LOAD %0 :: (load (<8 x s32>) from %ir.constant.not.uniform.v8i32)
|
||||
@ -300,10 +300,10 @@ body: |
|
||||
; CHECK-LABEL: name: load_constant_v16i16_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[COPY]](p4) :: (load (s128) from %ir.constant.not.uniform, align 32, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[COPY]](p4) :: (load (<8 x s16>) from %ir.constant.not.uniform, align 32, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from %ir.constant.not.uniform + 16, basealign 32, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (load (<8 x s16>) from %ir.constant.not.uniform + 16, basealign 32, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s16>) = G_CONCAT_VECTORS [[LOAD]](<8 x s16>), [[LOAD1]](<8 x s16>)
|
||||
%0:_(p4) = COPY $sgpr0_sgpr1
|
||||
%1:_(<16 x s16>) = G_LOAD %0 :: (load (<16 x s16>) from %ir.constant.not.uniform)
|
||||
@ -319,10 +319,10 @@ body: |
|
||||
; CHECK-LABEL: name: load_constant_v4i64_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (s128) from %ir.constant.not.uniform.v4i64, align 32, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v4i64, align 32, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from %ir.constant.not.uniform.v4i64 + 16, basealign 32, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v4i64 + 16, basealign 32, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
|
||||
%0:_(p4) = COPY $sgpr0_sgpr1
|
||||
%1:_(<4 x s64>) = G_LOAD %0 :: (load (<4 x s64>) from %ir.constant.not.uniform.v4i64)
|
||||
@ -338,16 +338,16 @@ body: |
|
||||
; CHECK-LABEL: name: load_constant_v16i32_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128) from %ir.constant.not.uniform.v16i32, align 64, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32, align 64, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from %ir.constant.not.uniform.v16i32 + 16, basealign 64, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32 + 16, basealign 64, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (load (s128) from %ir.constant.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p4) :: (load (s128) from %ir.constant.not.uniform.v16i32 + 48, basealign 64, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p4) :: (load (<4 x s32>) from %ir.constant.not.uniform.v16i32 + 48, basealign 64, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
%0:_(p4) = COPY $sgpr0_sgpr1
|
||||
%1:_(<16 x s32>) = G_LOAD %0 :: (load (<16 x s32>) from %ir.constant.not.uniform.v16i32)
|
||||
@ -363,16 +363,16 @@ body: |
|
||||
; CHECK-LABEL: name: load_constant_v8i64_non_uniform
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(p4) = COPY [[COPY]](p4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (s128) from %ir.constant.not.uniform.v8i64, align 64, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64, align 64, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from %ir.constant.not.uniform.v8i64 + 16, basealign 64, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64 + 16, basealign 64, addrspace 4)
|
||||
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
|
||||
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p4) :: (load (s128) from %ir.constant.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 4)
|
||||
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 4)
|
||||
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
|
||||
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p4) :: (load (s128) from %ir.constant.not.uniform.v8i64 + 48, basealign 64, addrspace 4)
|
||||
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p4) :: (load (<2 x s64>) from %ir.constant.not.uniform.v8i64 + 48, basealign 64, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>), [[LOAD2]](<2 x s64>), [[LOAD3]](<2 x s64>)
|
||||
%0:_(p4) = COPY $sgpr0_sgpr1
|
||||
%1:_(<8 x s64>) = G_LOAD %0 :: (load (<8 x s64>) from %ir.constant.not.uniform.v8i64)
|
||||
@ -616,10 +616,10 @@ body: |
|
||||
; CHECK-LABEL: name: load_constant_v8i32_vgpr_crash
|
||||
; CHECK: liveins: $vgpr0_vgpr1
|
||||
; CHECK: [[COPY:%[0-9]+]]:vgpr(p4) = COPY $vgpr0_vgpr1
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (s128), align 32, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load (<4 x s32>), align 32, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from unknown-address + 16, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
%0:_(p4) = COPY $vgpr0_vgpr1
|
||||
%1:_(<8 x s32>) = G_LOAD %0 :: (load (<8 x s32>), addrspace 4)
|
||||
@ -641,10 +641,10 @@ body: |
|
||||
; CHECK: bb.1:
|
||||
; CHECK: successors: %bb.1(0x80000000)
|
||||
; CHECK: [[PHI:%[0-9]+]]:vgpr(p4) = G_PHI [[COPY]](p4), %bb.0, %3(p4), %bb.1
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PHI]](p4) :: (load (s128), align 32, addrspace 4)
|
||||
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PHI]](p4) :: (load (<4 x s32>), align 32, addrspace 4)
|
||||
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[PHI]], [[C]](s64)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from unknown-address + 16, addrspace 4)
|
||||
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, addrspace 4)
|
||||
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; CHECK: [[COPY2:%[0-9]+]]:sgpr(p4) = COPY [[COPY1]](p4)
|
||||
; CHECK: G_BR %bb.1
|
||||
|
@ -14,16 +14,16 @@ body: |
|
||||
; GFX7: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
|
||||
; GFX7: %in_addr:sgpr(p1) = COPY $sgpr0_sgpr1
|
||||
; GFX7: %out_addr:sgpr(p1) = COPY $sgpr2_sgpr3
|
||||
; GFX7: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %in_addr(p1) :: (load (s128), align 4, addrspace 1)
|
||||
; GFX7: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %in_addr(p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; GFX7: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; GFX7: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C]](s64)
|
||||
; GFX7: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s128) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX7: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX7: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
|
||||
; GFX7: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C1]](s64)
|
||||
; GFX7: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s128) from unknown-address + 32, align 4, addrspace 1)
|
||||
; GFX7: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<4 x s32>) from unknown-address + 32, align 4, addrspace 1)
|
||||
; GFX7: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
|
||||
; GFX7: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C2]](s64)
|
||||
; GFX7: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s128) from unknown-address + 48, align 4, addrspace 1)
|
||||
; GFX7: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<4 x s32>) from unknown-address + 48, align 4, addrspace 1)
|
||||
; GFX7: %load:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX7: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>), %load8_11:vgpr(<4 x s32>), %load12_15:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<16 x s32>)
|
||||
; GFX7: G_STORE %load0_3(<4 x s32>), %out_addr(p1) :: (store (<4 x s32>), align 4, addrspace 1)
|
||||
@ -42,16 +42,16 @@ body: |
|
||||
; GFX1010: %in_addr:sgpr(p1) = COPY $sgpr0_sgpr1
|
||||
; GFX1010: %out_addr:sgpr(p1) = COPY $sgpr2_sgpr3
|
||||
; GFX1010: [[COPY:%[0-9]+]]:vgpr(p1) = COPY %in_addr(p1)
|
||||
; GFX1010: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %in_addr(p1) :: (load (s128), align 4, addrspace 1)
|
||||
; GFX1010: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %in_addr(p1) :: (load (<4 x s32>), align 4, addrspace 1)
|
||||
; GFX1010: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; GFX1010: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C]](s64)
|
||||
; GFX1010: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (s128) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX1010: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load (<4 x s32>) from unknown-address + 16, align 4, addrspace 1)
|
||||
; GFX1010: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
|
||||
; GFX1010: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C1]](s64)
|
||||
; GFX1010: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (s128) from unknown-address + 32, align 4, addrspace 1)
|
||||
; GFX1010: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load (<4 x s32>) from unknown-address + 32, align 4, addrspace 1)
|
||||
; GFX1010: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
|
||||
; GFX1010: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD %in_addr, [[C2]](s64)
|
||||
; GFX1010: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (s128) from unknown-address + 48, align 4, addrspace 1)
|
||||
; GFX1010: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load (<4 x s32>) from unknown-address + 48, align 4, addrspace 1)
|
||||
; GFX1010: %load:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
|
||||
; GFX1010: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>), %load8_11:vgpr(<4 x s32>), %load12_15:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<16 x s32>)
|
||||
; GFX1010: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY %out_addr(p1)
|
||||
@ -98,10 +98,10 @@ body: |
|
||||
; GFX7: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
|
||||
; GFX7: %ptr:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; GFX7: %out:sgpr(p1) = COPY $sgpr2_sgpr3
|
||||
; GFX7: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %ptr(p4) :: (load (s128), align 1, addrspace 4)
|
||||
; GFX7: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %ptr(p4) :: (load (<4 x s32>), align 1, addrspace 4)
|
||||
; GFX7: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; GFX7: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; GFX7: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from unknown-address + 16, align 1, addrspace 4)
|
||||
; GFX7: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, align 1, addrspace 4)
|
||||
; GFX7: %load:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX7: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<8 x s32>)
|
||||
; GFX7: G_STORE %load0_3(<4 x s32>), %out(p1) :: (store (<4 x s32>), align 32, addrspace 1)
|
||||
@ -114,10 +114,10 @@ body: |
|
||||
; GFX1010: %ptr:sgpr(p4) = COPY $sgpr0_sgpr1
|
||||
; GFX1010: %out:sgpr(p1) = COPY $sgpr2_sgpr3
|
||||
; GFX1010: [[COPY:%[0-9]+]]:vgpr(p4) = COPY %ptr(p4)
|
||||
; GFX1010: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %ptr(p4) :: (load (s128), align 1, addrspace 4)
|
||||
; GFX1010: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD %ptr(p4) :: (load (<4 x s32>), align 1, addrspace 4)
|
||||
; GFX1010: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
|
||||
; GFX1010: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD %ptr, [[C]](s64)
|
||||
; GFX1010: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (s128) from unknown-address + 16, align 1, addrspace 4)
|
||||
; GFX1010: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load (<4 x s32>) from unknown-address + 16, align 1, addrspace 4)
|
||||
; GFX1010: %load:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
|
||||
; GFX1010: %load0_3:vgpr(<4 x s32>), %load4_7:vgpr(<4 x s32>) = G_UNMERGE_VALUES %load(<8 x s32>)
|
||||
; GFX1010: [[COPY1:%[0-9]+]]:vgpr(p1) = COPY %out(p1)
|
||||
|
Loading…
Reference in New Issue
Block a user