1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2025-01-31 12:41:49 +01:00

[CodeGen] Try to make the print of memory operand alignment a little more user friendly.

Memory operands store a base alignment that does not factor in
the effect of the offset on the alignment.

Previously the printing code only printed the base alignment if
it was different than the size. If there is an offset, the reader
would need to figure out the effective alignment themselves. This
has confused me before and someone else was recently confused on
IRC.

This patch prints the possibly offset adjusted alignment if it is
different than the size. And prints the base alignment if it is
different than the alignment. The MIR parser has been updated to
read basealign in addition to align.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D94344
This commit is contained in:
Craig Topper 2021-01-11 19:58:35 -08:00
parent 0452f12eb6
commit aecf5b1559
22 changed files with 343 additions and 325 deletions

View File

@ -249,6 +249,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
.Case("dereferenceable", MIToken::kw_dereferenceable)
.Case("invariant", MIToken::kw_invariant)
.Case("align", MIToken::kw_align)
.Case("basealign", MIToken::kw_align)
.Case("addrspace", MIToken::kw_addrspace)
.Case("stack", MIToken::kw_stack)
.Case("got", MIToken::kw_got)

View File

@ -104,6 +104,7 @@ struct MIToken {
kw_non_temporal,
kw_invariant,
kw_align,
kw_basealign,
kw_addrspace,
kw_stack,
kw_got,

View File

@ -2726,7 +2726,7 @@ bool MIParser::parseOffset(int64_t &Offset) {
}
bool MIParser::parseAlignment(unsigned &Alignment) {
assert(Token.is(MIToken::kw_align));
assert(Token.is(MIToken::kw_align) || Token.is(MIToken::kw_basealign));
lex();
if (Token.isNot(MIToken::IntegerLiteral) || Token.integerValue().isSigned())
return error("expected an integer literal after 'align'");
@ -3074,6 +3074,12 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
while (consumeIfPresent(MIToken::comma)) {
switch (Token.kind()) {
case MIToken::kw_align:
// align is printed if it is different than size.
if (parseAlignment(BaseAlignment))
return true;
break;
case MIToken::kw_basealign:
// basealign is printed if it is different than align.
if (parseAlignment(BaseAlignment))
return true;
break;

View File

@ -1162,8 +1162,10 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
}
}
MachineOperand::printOperandOffset(OS, getOffset());
if (getBaseAlign() != getSize())
OS << ", align " << getBaseAlign().value();
if (getAlign() != getSize())
OS << ", align " << getAlign().value();
if (getAlign() != getBaseAlign())
OS << ", basealign " << getBaseAlign().value();
auto AAInfo = getAAInfo();
if (AAInfo.TBAA) {
OS << ", !tbaa ";

View File

@ -256,9 +256,9 @@ body: |
; CHECK: G_STORE [[LOAD7]](s128), [[GEP13]](p0) :: (store 16 into %ir.0 + 112, align 4)
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
; CHECK: [[GEP14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load 16 from %ir.1 + 127, align 4)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load 16 from %ir.1 + 127, align 1, basealign 4)
; CHECK: [[GEP15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store 16 into %ir.0 + 127, align 4)
; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store 16 into %ir.0 + 127, align 1, basealign 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1

View File

@ -27,7 +27,7 @@ body: |
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 2 from %ir.ptr, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.ptr + 2, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.ptr + 2, align 2, basealign 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C2]](s64)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
@ -35,7 +35,7 @@ body: |
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s64)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 2 into %ir.ptr2, align 4)
; CHECK: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.ptr2 + 2, align 4)
; CHECK: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.ptr2 + 2, align 2, basealign 4)
; CHECK: $w0 = COPY [[C]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0

View File

@ -1498,194 +1498,194 @@ body: |
; CHECK: G_STORE [[UV]](p3), [[FRAME_INDEX]](p5) :: (store 4 into %stack.0, align 256, addrspace 5)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; CHECK: G_STORE [[UV1]](p3), [[PTR_ADD3]](p5) :: (store 4 into %stack.0 + 4, align 256, addrspace 5)
; CHECK: G_STORE [[UV1]](p3), [[PTR_ADD3]](p5) :: (store 4 into %stack.0 + 4, basealign 256, addrspace 5)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK: G_STORE [[UV2]](p3), [[PTR_ADD4]](p5) :: (store 4 into %stack.0 + 8, align 256, addrspace 5)
; CHECK: G_STORE [[UV2]](p3), [[PTR_ADD4]](p5) :: (store 4 into %stack.0 + 8, align 8, basealign 256, addrspace 5)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK: G_STORE [[UV3]](p3), [[PTR_ADD5]](p5) :: (store 4 into %stack.0 + 12, align 256, addrspace 5)
; CHECK: G_STORE [[UV3]](p3), [[PTR_ADD5]](p5) :: (store 4 into %stack.0 + 12, basealign 256, addrspace 5)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK: G_STORE [[UV4]](p3), [[PTR_ADD6]](p5) :: (store 4 into %stack.0 + 16, align 256, addrspace 5)
; CHECK: G_STORE [[UV4]](p3), [[PTR_ADD6]](p5) :: (store 4 into %stack.0 + 16, align 16, basealign 256, addrspace 5)
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK: G_STORE [[UV5]](p3), [[PTR_ADD7]](p5) :: (store 4 into %stack.0 + 20, align 256, addrspace 5)
; CHECK: G_STORE [[UV5]](p3), [[PTR_ADD7]](p5) :: (store 4 into %stack.0 + 20, basealign 256, addrspace 5)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK: G_STORE [[UV6]](p3), [[PTR_ADD8]](p5) :: (store 4 into %stack.0 + 24, align 256, addrspace 5)
; CHECK: G_STORE [[UV6]](p3), [[PTR_ADD8]](p5) :: (store 4 into %stack.0 + 24, align 8, basealign 256, addrspace 5)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK: G_STORE [[UV7]](p3), [[PTR_ADD9]](p5) :: (store 4 into %stack.0 + 28, align 256, addrspace 5)
; CHECK: G_STORE [[UV7]](p3), [[PTR_ADD9]](p5) :: (store 4 into %stack.0 + 28, basealign 256, addrspace 5)
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK: G_STORE [[UV8]](p3), [[PTR_ADD10]](p5) :: (store 4 into %stack.0 + 32, align 256, addrspace 5)
; CHECK: G_STORE [[UV8]](p3), [[PTR_ADD10]](p5) :: (store 4 into %stack.0 + 32, align 32, basealign 256, addrspace 5)
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK: G_STORE [[UV9]](p3), [[PTR_ADD11]](p5) :: (store 4 into %stack.0 + 36, align 256, addrspace 5)
; CHECK: G_STORE [[UV9]](p3), [[PTR_ADD11]](p5) :: (store 4 into %stack.0 + 36, basealign 256, addrspace 5)
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK: G_STORE [[UV10]](p3), [[PTR_ADD12]](p5) :: (store 4 into %stack.0 + 40, align 256, addrspace 5)
; CHECK: G_STORE [[UV10]](p3), [[PTR_ADD12]](p5) :: (store 4 into %stack.0 + 40, align 8, basealign 256, addrspace 5)
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK: G_STORE [[UV11]](p3), [[PTR_ADD13]](p5) :: (store 4 into %stack.0 + 44, align 256, addrspace 5)
; CHECK: G_STORE [[UV11]](p3), [[PTR_ADD13]](p5) :: (store 4 into %stack.0 + 44, basealign 256, addrspace 5)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK: G_STORE [[UV12]](p3), [[PTR_ADD14]](p5) :: (store 4 into %stack.0 + 48, align 256, addrspace 5)
; CHECK: G_STORE [[UV12]](p3), [[PTR_ADD14]](p5) :: (store 4 into %stack.0 + 48, align 16, basealign 256, addrspace 5)
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK: G_STORE [[UV13]](p3), [[PTR_ADD15]](p5) :: (store 4 into %stack.0 + 52, align 256, addrspace 5)
; CHECK: G_STORE [[UV13]](p3), [[PTR_ADD15]](p5) :: (store 4 into %stack.0 + 52, basealign 256, addrspace 5)
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK: G_STORE [[UV14]](p3), [[PTR_ADD16]](p5) :: (store 4 into %stack.0 + 56, align 256, addrspace 5)
; CHECK: G_STORE [[UV14]](p3), [[PTR_ADD16]](p5) :: (store 4 into %stack.0 + 56, align 8, basealign 256, addrspace 5)
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK: G_STORE [[UV15]](p3), [[PTR_ADD17]](p5) :: (store 4 into %stack.0 + 60, align 256, addrspace 5)
; CHECK: G_STORE [[UV15]](p3), [[PTR_ADD17]](p5) :: (store 4 into %stack.0 + 60, basealign 256, addrspace 5)
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK: G_STORE [[UV16]](p3), [[PTR_ADD18]](p5) :: (store 4 into %stack.0 + 64, align 256, addrspace 5)
; CHECK: G_STORE [[UV16]](p3), [[PTR_ADD18]](p5) :: (store 4 into %stack.0 + 64, align 64, basealign 256, addrspace 5)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK: G_STORE [[UV17]](p3), [[PTR_ADD19]](p5) :: (store 4 into %stack.0 + 68, align 256, addrspace 5)
; CHECK: G_STORE [[UV17]](p3), [[PTR_ADD19]](p5) :: (store 4 into %stack.0 + 68, basealign 256, addrspace 5)
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK: G_STORE [[UV18]](p3), [[PTR_ADD20]](p5) :: (store 4 into %stack.0 + 72, align 256, addrspace 5)
; CHECK: G_STORE [[UV18]](p3), [[PTR_ADD20]](p5) :: (store 4 into %stack.0 + 72, align 8, basealign 256, addrspace 5)
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK: G_STORE [[UV19]](p3), [[PTR_ADD21]](p5) :: (store 4 into %stack.0 + 76, align 256, addrspace 5)
; CHECK: G_STORE [[UV19]](p3), [[PTR_ADD21]](p5) :: (store 4 into %stack.0 + 76, basealign 256, addrspace 5)
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK: G_STORE [[UV20]](p3), [[PTR_ADD22]](p5) :: (store 4 into %stack.0 + 80, align 256, addrspace 5)
; CHECK: G_STORE [[UV20]](p3), [[PTR_ADD22]](p5) :: (store 4 into %stack.0 + 80, align 16, basealign 256, addrspace 5)
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK: G_STORE [[UV21]](p3), [[PTR_ADD23]](p5) :: (store 4 into %stack.0 + 84, align 256, addrspace 5)
; CHECK: G_STORE [[UV21]](p3), [[PTR_ADD23]](p5) :: (store 4 into %stack.0 + 84, basealign 256, addrspace 5)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK: G_STORE [[UV22]](p3), [[PTR_ADD24]](p5) :: (store 4 into %stack.0 + 88, align 256, addrspace 5)
; CHECK: G_STORE [[UV22]](p3), [[PTR_ADD24]](p5) :: (store 4 into %stack.0 + 88, align 8, basealign 256, addrspace 5)
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK: G_STORE [[UV23]](p3), [[PTR_ADD25]](p5) :: (store 4 into %stack.0 + 92, align 256, addrspace 5)
; CHECK: G_STORE [[UV23]](p3), [[PTR_ADD25]](p5) :: (store 4 into %stack.0 + 92, basealign 256, addrspace 5)
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK: G_STORE [[UV24]](p3), [[PTR_ADD26]](p5) :: (store 4 into %stack.0 + 96, align 256, addrspace 5)
; CHECK: G_STORE [[UV24]](p3), [[PTR_ADD26]](p5) :: (store 4 into %stack.0 + 96, align 32, basealign 256, addrspace 5)
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK: G_STORE [[UV25]](p3), [[PTR_ADD27]](p5) :: (store 4 into %stack.0 + 100, align 256, addrspace 5)
; CHECK: G_STORE [[UV25]](p3), [[PTR_ADD27]](p5) :: (store 4 into %stack.0 + 100, basealign 256, addrspace 5)
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK: G_STORE [[UV26]](p3), [[PTR_ADD28]](p5) :: (store 4 into %stack.0 + 104, align 256, addrspace 5)
; CHECK: G_STORE [[UV26]](p3), [[PTR_ADD28]](p5) :: (store 4 into %stack.0 + 104, align 8, basealign 256, addrspace 5)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK: G_STORE [[UV27]](p3), [[PTR_ADD29]](p5) :: (store 4 into %stack.0 + 108, align 256, addrspace 5)
; CHECK: G_STORE [[UV27]](p3), [[PTR_ADD29]](p5) :: (store 4 into %stack.0 + 108, basealign 256, addrspace 5)
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
; CHECK: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK: G_STORE [[UV28]](p3), [[PTR_ADD30]](p5) :: (store 4 into %stack.0 + 112, align 256, addrspace 5)
; CHECK: G_STORE [[UV28]](p3), [[PTR_ADD30]](p5) :: (store 4 into %stack.0 + 112, align 16, basealign 256, addrspace 5)
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
; CHECK: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK: G_STORE [[UV29]](p3), [[PTR_ADD31]](p5) :: (store 4 into %stack.0 + 116, align 256, addrspace 5)
; CHECK: G_STORE [[UV29]](p3), [[PTR_ADD31]](p5) :: (store 4 into %stack.0 + 116, basealign 256, addrspace 5)
; CHECK: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
; CHECK: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK: G_STORE [[UV30]](p3), [[PTR_ADD32]](p5) :: (store 4 into %stack.0 + 120, align 256, addrspace 5)
; CHECK: G_STORE [[UV30]](p3), [[PTR_ADD32]](p5) :: (store 4 into %stack.0 + 120, align 8, basealign 256, addrspace 5)
; CHECK: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
; CHECK: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK: G_STORE [[UV31]](p3), [[PTR_ADD33]](p5) :: (store 4 into %stack.0 + 124, align 256, addrspace 5)
; CHECK: G_STORE [[UV31]](p3), [[PTR_ADD33]](p5) :: (store 4 into %stack.0 + 124, basealign 256, addrspace 5)
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
; CHECK: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK: G_STORE [[UV32]](p3), [[PTR_ADD34]](p5) :: (store 4 into %stack.0 + 128, align 256, addrspace 5)
; CHECK: G_STORE [[UV32]](p3), [[PTR_ADD34]](p5) :: (store 4 into %stack.0 + 128, align 128, basealign 256, addrspace 5)
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
; CHECK: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(p5) = COPY [[PTR_ADD35]](p5)
; CHECK: G_STORE [[UV33]](p3), [[COPY1]](p5) :: (store 4 into %stack.0 + 132, align 256, addrspace 5)
; CHECK: G_STORE [[UV33]](p3), [[COPY1]](p5) :: (store 4 into %stack.0 + 132, basealign 256, addrspace 5)
; CHECK: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
; CHECK: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK: G_STORE [[UV34]](p3), [[PTR_ADD36]](p5) :: (store 4 into %stack.0 + 136, align 256, addrspace 5)
; CHECK: G_STORE [[UV34]](p3), [[PTR_ADD36]](p5) :: (store 4 into %stack.0 + 136, align 8, basealign 256, addrspace 5)
; CHECK: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
; CHECK: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK: G_STORE [[UV35]](p3), [[PTR_ADD37]](p5) :: (store 4 into %stack.0 + 140, align 256, addrspace 5)
; CHECK: G_STORE [[UV35]](p3), [[PTR_ADD37]](p5) :: (store 4 into %stack.0 + 140, basealign 256, addrspace 5)
; CHECK: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
; CHECK: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK: G_STORE [[UV36]](p3), [[PTR_ADD38]](p5) :: (store 4 into %stack.0 + 144, align 256, addrspace 5)
; CHECK: G_STORE [[UV36]](p3), [[PTR_ADD38]](p5) :: (store 4 into %stack.0 + 144, align 16, basealign 256, addrspace 5)
; CHECK: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
; CHECK: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK: G_STORE [[UV37]](p3), [[PTR_ADD39]](p5) :: (store 4 into %stack.0 + 148, align 256, addrspace 5)
; CHECK: G_STORE [[UV37]](p3), [[PTR_ADD39]](p5) :: (store 4 into %stack.0 + 148, basealign 256, addrspace 5)
; CHECK: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
; CHECK: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK: G_STORE [[UV38]](p3), [[PTR_ADD40]](p5) :: (store 4 into %stack.0 + 152, align 256, addrspace 5)
; CHECK: G_STORE [[UV38]](p3), [[PTR_ADD40]](p5) :: (store 4 into %stack.0 + 152, align 8, basealign 256, addrspace 5)
; CHECK: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
; CHECK: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK: G_STORE [[UV39]](p3), [[PTR_ADD41]](p5) :: (store 4 into %stack.0 + 156, align 256, addrspace 5)
; CHECK: G_STORE [[UV39]](p3), [[PTR_ADD41]](p5) :: (store 4 into %stack.0 + 156, basealign 256, addrspace 5)
; CHECK: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
; CHECK: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK: G_STORE [[UV40]](p3), [[PTR_ADD42]](p5) :: (store 4 into %stack.0 + 160, align 256, addrspace 5)
; CHECK: G_STORE [[UV40]](p3), [[PTR_ADD42]](p5) :: (store 4 into %stack.0 + 160, align 32, basealign 256, addrspace 5)
; CHECK: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
; CHECK: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK: G_STORE [[UV41]](p3), [[PTR_ADD43]](p5) :: (store 4 into %stack.0 + 164, align 256, addrspace 5)
; CHECK: G_STORE [[UV41]](p3), [[PTR_ADD43]](p5) :: (store 4 into %stack.0 + 164, basealign 256, addrspace 5)
; CHECK: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
; CHECK: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK: G_STORE [[UV42]](p3), [[PTR_ADD44]](p5) :: (store 4 into %stack.0 + 168, align 256, addrspace 5)
; CHECK: G_STORE [[UV42]](p3), [[PTR_ADD44]](p5) :: (store 4 into %stack.0 + 168, align 8, basealign 256, addrspace 5)
; CHECK: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
; CHECK: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK: G_STORE [[UV43]](p3), [[PTR_ADD45]](p5) :: (store 4 into %stack.0 + 172, align 256, addrspace 5)
; CHECK: G_STORE [[UV43]](p3), [[PTR_ADD45]](p5) :: (store 4 into %stack.0 + 172, basealign 256, addrspace 5)
; CHECK: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
; CHECK: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK: G_STORE [[UV44]](p3), [[PTR_ADD46]](p5) :: (store 4 into %stack.0 + 176, align 256, addrspace 5)
; CHECK: G_STORE [[UV44]](p3), [[PTR_ADD46]](p5) :: (store 4 into %stack.0 + 176, align 16, basealign 256, addrspace 5)
; CHECK: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
; CHECK: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK: G_STORE [[UV45]](p3), [[PTR_ADD47]](p5) :: (store 4 into %stack.0 + 180, align 256, addrspace 5)
; CHECK: G_STORE [[UV45]](p3), [[PTR_ADD47]](p5) :: (store 4 into %stack.0 + 180, basealign 256, addrspace 5)
; CHECK: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
; CHECK: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK: G_STORE [[UV46]](p3), [[PTR_ADD48]](p5) :: (store 4 into %stack.0 + 184, align 256, addrspace 5)
; CHECK: G_STORE [[UV46]](p3), [[PTR_ADD48]](p5) :: (store 4 into %stack.0 + 184, align 8, basealign 256, addrspace 5)
; CHECK: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
; CHECK: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK: G_STORE [[UV47]](p3), [[PTR_ADD49]](p5) :: (store 4 into %stack.0 + 188, align 256, addrspace 5)
; CHECK: G_STORE [[UV47]](p3), [[PTR_ADD49]](p5) :: (store 4 into %stack.0 + 188, basealign 256, addrspace 5)
; CHECK: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
; CHECK: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK: G_STORE [[UV48]](p3), [[PTR_ADD50]](p5) :: (store 4 into %stack.0 + 192, align 256, addrspace 5)
; CHECK: G_STORE [[UV48]](p3), [[PTR_ADD50]](p5) :: (store 4 into %stack.0 + 192, align 64, basealign 256, addrspace 5)
; CHECK: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
; CHECK: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK: G_STORE [[UV49]](p3), [[PTR_ADD51]](p5) :: (store 4 into %stack.0 + 196, align 256, addrspace 5)
; CHECK: G_STORE [[UV49]](p3), [[PTR_ADD51]](p5) :: (store 4 into %stack.0 + 196, basealign 256, addrspace 5)
; CHECK: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
; CHECK: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK: G_STORE [[UV50]](p3), [[PTR_ADD52]](p5) :: (store 4 into %stack.0 + 200, align 256, addrspace 5)
; CHECK: G_STORE [[UV50]](p3), [[PTR_ADD52]](p5) :: (store 4 into %stack.0 + 200, align 8, basealign 256, addrspace 5)
; CHECK: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
; CHECK: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK: G_STORE [[UV51]](p3), [[PTR_ADD53]](p5) :: (store 4 into %stack.0 + 204, align 256, addrspace 5)
; CHECK: G_STORE [[UV51]](p3), [[PTR_ADD53]](p5) :: (store 4 into %stack.0 + 204, basealign 256, addrspace 5)
; CHECK: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
; CHECK: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK: G_STORE [[UV52]](p3), [[PTR_ADD54]](p5) :: (store 4 into %stack.0 + 208, align 256, addrspace 5)
; CHECK: G_STORE [[UV52]](p3), [[PTR_ADD54]](p5) :: (store 4 into %stack.0 + 208, align 16, basealign 256, addrspace 5)
; CHECK: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
; CHECK: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK: G_STORE [[UV53]](p3), [[PTR_ADD55]](p5) :: (store 4 into %stack.0 + 212, align 256, addrspace 5)
; CHECK: G_STORE [[UV53]](p3), [[PTR_ADD55]](p5) :: (store 4 into %stack.0 + 212, basealign 256, addrspace 5)
; CHECK: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
; CHECK: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK: G_STORE [[UV54]](p3), [[PTR_ADD56]](p5) :: (store 4 into %stack.0 + 216, align 256, addrspace 5)
; CHECK: G_STORE [[UV54]](p3), [[PTR_ADD56]](p5) :: (store 4 into %stack.0 + 216, align 8, basealign 256, addrspace 5)
; CHECK: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
; CHECK: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK: G_STORE [[UV55]](p3), [[PTR_ADD57]](p5) :: (store 4 into %stack.0 + 220, align 256, addrspace 5)
; CHECK: G_STORE [[UV55]](p3), [[PTR_ADD57]](p5) :: (store 4 into %stack.0 + 220, basealign 256, addrspace 5)
; CHECK: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
; CHECK: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK: G_STORE [[UV56]](p3), [[PTR_ADD58]](p5) :: (store 4 into %stack.0 + 224, align 256, addrspace 5)
; CHECK: G_STORE [[UV56]](p3), [[PTR_ADD58]](p5) :: (store 4 into %stack.0 + 224, align 32, basealign 256, addrspace 5)
; CHECK: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
; CHECK: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK: G_STORE [[UV57]](p3), [[PTR_ADD59]](p5) :: (store 4 into %stack.0 + 228, align 256, addrspace 5)
; CHECK: G_STORE [[UV57]](p3), [[PTR_ADD59]](p5) :: (store 4 into %stack.0 + 228, basealign 256, addrspace 5)
; CHECK: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
; CHECK: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK: G_STORE [[UV58]](p3), [[PTR_ADD60]](p5) :: (store 4 into %stack.0 + 232, align 256, addrspace 5)
; CHECK: G_STORE [[UV58]](p3), [[PTR_ADD60]](p5) :: (store 4 into %stack.0 + 232, align 8, basealign 256, addrspace 5)
; CHECK: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
; CHECK: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK: G_STORE [[UV59]](p3), [[PTR_ADD61]](p5) :: (store 4 into %stack.0 + 236, align 256, addrspace 5)
; CHECK: G_STORE [[UV59]](p3), [[PTR_ADD61]](p5) :: (store 4 into %stack.0 + 236, basealign 256, addrspace 5)
; CHECK: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
; CHECK: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK: G_STORE [[UV60]](p3), [[PTR_ADD62]](p5) :: (store 4 into %stack.0 + 240, align 256, addrspace 5)
; CHECK: G_STORE [[UV60]](p3), [[PTR_ADD62]](p5) :: (store 4 into %stack.0 + 240, align 16, basealign 256, addrspace 5)
; CHECK: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
; CHECK: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK: G_STORE [[UV61]](p3), [[PTR_ADD63]](p5) :: (store 4 into %stack.0 + 244, align 256, addrspace 5)
; CHECK: G_STORE [[UV61]](p3), [[PTR_ADD63]](p5) :: (store 4 into %stack.0 + 244, basealign 256, addrspace 5)
; CHECK: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
; CHECK: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK: G_STORE [[UV62]](p3), [[PTR_ADD64]](p5) :: (store 4 into %stack.0 + 248, align 256, addrspace 5)
; CHECK: G_STORE [[UV62]](p3), [[PTR_ADD64]](p5) :: (store 4 into %stack.0 + 248, align 8, basealign 256, addrspace 5)
; CHECK: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
; CHECK: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK: G_STORE [[UV63]](p3), [[PTR_ADD65]](p5) :: (store 4 into %stack.0 + 252, align 256, addrspace 5)
; CHECK: G_STORE [[UV63]](p3), [[PTR_ADD65]](p5) :: (store 4 into %stack.0 + 252, basealign 256, addrspace 5)
; CHECK: [[LOAD4:%[0-9]+]]:_(p3) = G_LOAD [[PTR_ADD35]](p5) :: (load 4 from %stack.0 + 132, addrspace 5)
; CHECK: S_ENDPGM 0, implicit [[LOAD4]](p3)
%0:_(p1) = COPY $sgpr0_sgpr1
@ -1723,193 +1723,193 @@ body: |
; CHECK: G_STORE [[UV]](s32), [[FRAME_INDEX]](p5) :: (store 4 into %stack.0, align 256, addrspace 5)
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C3]](s32)
; CHECK: G_STORE [[UV1]](s32), [[PTR_ADD3]](p5) :: (store 4 into %stack.0 + 4, align 256, addrspace 5)
; CHECK: G_STORE [[UV1]](s32), [[PTR_ADD3]](p5) :: (store 4 into %stack.0 + 4, basealign 256, addrspace 5)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK: G_STORE [[UV2]](s32), [[PTR_ADD4]](p5) :: (store 4 into %stack.0 + 8, align 256, addrspace 5)
; CHECK: G_STORE [[UV2]](s32), [[PTR_ADD4]](p5) :: (store 4 into %stack.0 + 8, align 8, basealign 256, addrspace 5)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK: G_STORE [[UV3]](s32), [[PTR_ADD5]](p5) :: (store 4 into %stack.0 + 12, align 256, addrspace 5)
; CHECK: G_STORE [[UV3]](s32), [[PTR_ADD5]](p5) :: (store 4 into %stack.0 + 12, basealign 256, addrspace 5)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK: G_STORE [[UV4]](s32), [[PTR_ADD6]](p5) :: (store 4 into %stack.0 + 16, align 256, addrspace 5)
; CHECK: G_STORE [[UV4]](s32), [[PTR_ADD6]](p5) :: (store 4 into %stack.0 + 16, align 16, basealign 256, addrspace 5)
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK: G_STORE [[UV5]](s32), [[PTR_ADD7]](p5) :: (store 4 into %stack.0 + 20, align 256, addrspace 5)
; CHECK: G_STORE [[UV5]](s32), [[PTR_ADD7]](p5) :: (store 4 into %stack.0 + 20, basealign 256, addrspace 5)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK: G_STORE [[UV6]](s32), [[PTR_ADD8]](p5) :: (store 4 into %stack.0 + 24, align 256, addrspace 5)
; CHECK: G_STORE [[UV6]](s32), [[PTR_ADD8]](p5) :: (store 4 into %stack.0 + 24, align 8, basealign 256, addrspace 5)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK: G_STORE [[UV7]](s32), [[PTR_ADD9]](p5) :: (store 4 into %stack.0 + 28, align 256, addrspace 5)
; CHECK: G_STORE [[UV7]](s32), [[PTR_ADD9]](p5) :: (store 4 into %stack.0 + 28, basealign 256, addrspace 5)
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK: G_STORE [[UV8]](s32), [[PTR_ADD10]](p5) :: (store 4 into %stack.0 + 32, align 256, addrspace 5)
; CHECK: G_STORE [[UV8]](s32), [[PTR_ADD10]](p5) :: (store 4 into %stack.0 + 32, align 32, basealign 256, addrspace 5)
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK: G_STORE [[UV9]](s32), [[PTR_ADD11]](p5) :: (store 4 into %stack.0 + 36, align 256, addrspace 5)
; CHECK: G_STORE [[UV9]](s32), [[PTR_ADD11]](p5) :: (store 4 into %stack.0 + 36, basealign 256, addrspace 5)
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK: G_STORE [[UV10]](s32), [[PTR_ADD12]](p5) :: (store 4 into %stack.0 + 40, align 256, addrspace 5)
; CHECK: G_STORE [[UV10]](s32), [[PTR_ADD12]](p5) :: (store 4 into %stack.0 + 40, align 8, basealign 256, addrspace 5)
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK: G_STORE [[UV11]](s32), [[PTR_ADD13]](p5) :: (store 4 into %stack.0 + 44, align 256, addrspace 5)
; CHECK: G_STORE [[UV11]](s32), [[PTR_ADD13]](p5) :: (store 4 into %stack.0 + 44, basealign 256, addrspace 5)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK: G_STORE [[UV12]](s32), [[PTR_ADD14]](p5) :: (store 4 into %stack.0 + 48, align 256, addrspace 5)
; CHECK: G_STORE [[UV12]](s32), [[PTR_ADD14]](p5) :: (store 4 into %stack.0 + 48, align 16, basealign 256, addrspace 5)
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK: G_STORE [[UV13]](s32), [[PTR_ADD15]](p5) :: (store 4 into %stack.0 + 52, align 256, addrspace 5)
; CHECK: G_STORE [[UV13]](s32), [[PTR_ADD15]](p5) :: (store 4 into %stack.0 + 52, basealign 256, addrspace 5)
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK: G_STORE [[UV14]](s32), [[PTR_ADD16]](p5) :: (store 4 into %stack.0 + 56, align 256, addrspace 5)
; CHECK: G_STORE [[UV14]](s32), [[PTR_ADD16]](p5) :: (store 4 into %stack.0 + 56, align 8, basealign 256, addrspace 5)
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK: G_STORE [[UV15]](s32), [[PTR_ADD17]](p5) :: (store 4 into %stack.0 + 60, align 256, addrspace 5)
; CHECK: G_STORE [[UV15]](s32), [[PTR_ADD17]](p5) :: (store 4 into %stack.0 + 60, basealign 256, addrspace 5)
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK: G_STORE [[UV16]](s32), [[PTR_ADD18]](p5) :: (store 4 into %stack.0 + 64, align 256, addrspace 5)
; CHECK: G_STORE [[UV16]](s32), [[PTR_ADD18]](p5) :: (store 4 into %stack.0 + 64, align 64, basealign 256, addrspace 5)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK: G_STORE [[UV17]](s32), [[PTR_ADD19]](p5) :: (store 4 into %stack.0 + 68, align 256, addrspace 5)
; CHECK: G_STORE [[UV17]](s32), [[PTR_ADD19]](p5) :: (store 4 into %stack.0 + 68, basealign 256, addrspace 5)
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK: G_STORE [[UV18]](s32), [[PTR_ADD20]](p5) :: (store 4 into %stack.0 + 72, align 256, addrspace 5)
; CHECK: G_STORE [[UV18]](s32), [[PTR_ADD20]](p5) :: (store 4 into %stack.0 + 72, align 8, basealign 256, addrspace 5)
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK: G_STORE [[UV19]](s32), [[PTR_ADD21]](p5) :: (store 4 into %stack.0 + 76, align 256, addrspace 5)
; CHECK: G_STORE [[UV19]](s32), [[PTR_ADD21]](p5) :: (store 4 into %stack.0 + 76, basealign 256, addrspace 5)
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK: G_STORE [[UV20]](s32), [[PTR_ADD22]](p5) :: (store 4 into %stack.0 + 80, align 256, addrspace 5)
; CHECK: G_STORE [[UV20]](s32), [[PTR_ADD22]](p5) :: (store 4 into %stack.0 + 80, align 16, basealign 256, addrspace 5)
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK: G_STORE [[UV21]](s32), [[PTR_ADD23]](p5) :: (store 4 into %stack.0 + 84, align 256, addrspace 5)
; CHECK: G_STORE [[UV21]](s32), [[PTR_ADD23]](p5) :: (store 4 into %stack.0 + 84, basealign 256, addrspace 5)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK: G_STORE [[UV22]](s32), [[PTR_ADD24]](p5) :: (store 4 into %stack.0 + 88, align 256, addrspace 5)
; CHECK: G_STORE [[UV22]](s32), [[PTR_ADD24]](p5) :: (store 4 into %stack.0 + 88, align 8, basealign 256, addrspace 5)
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK: G_STORE [[UV23]](s32), [[PTR_ADD25]](p5) :: (store 4 into %stack.0 + 92, align 256, addrspace 5)
; CHECK: G_STORE [[UV23]](s32), [[PTR_ADD25]](p5) :: (store 4 into %stack.0 + 92, basealign 256, addrspace 5)
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK: G_STORE [[UV24]](s32), [[PTR_ADD26]](p5) :: (store 4 into %stack.0 + 96, align 256, addrspace 5)
; CHECK: G_STORE [[UV24]](s32), [[PTR_ADD26]](p5) :: (store 4 into %stack.0 + 96, align 32, basealign 256, addrspace 5)
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK: G_STORE [[UV25]](s32), [[PTR_ADD27]](p5) :: (store 4 into %stack.0 + 100, align 256, addrspace 5)
; CHECK: G_STORE [[UV25]](s32), [[PTR_ADD27]](p5) :: (store 4 into %stack.0 + 100, basealign 256, addrspace 5)
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK: G_STORE [[UV26]](s32), [[PTR_ADD28]](p5) :: (store 4 into %stack.0 + 104, align 256, addrspace 5)
; CHECK: G_STORE [[UV26]](s32), [[PTR_ADD28]](p5) :: (store 4 into %stack.0 + 104, align 8, basealign 256, addrspace 5)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK: G_STORE [[UV27]](s32), [[PTR_ADD29]](p5) :: (store 4 into %stack.0 + 108, align 256, addrspace 5)
; CHECK: G_STORE [[UV27]](s32), [[PTR_ADD29]](p5) :: (store 4 into %stack.0 + 108, basealign 256, addrspace 5)
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
; CHECK: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK: G_STORE [[UV28]](s32), [[PTR_ADD30]](p5) :: (store 4 into %stack.0 + 112, align 256, addrspace 5)
; CHECK: G_STORE [[UV28]](s32), [[PTR_ADD30]](p5) :: (store 4 into %stack.0 + 112, align 16, basealign 256, addrspace 5)
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
; CHECK: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK: G_STORE [[UV29]](s32), [[PTR_ADD31]](p5) :: (store 4 into %stack.0 + 116, align 256, addrspace 5)
; CHECK: G_STORE [[UV29]](s32), [[PTR_ADD31]](p5) :: (store 4 into %stack.0 + 116, basealign 256, addrspace 5)
; CHECK: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
; CHECK: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK: G_STORE [[UV30]](s32), [[PTR_ADD32]](p5) :: (store 4 into %stack.0 + 120, align 256, addrspace 5)
; CHECK: G_STORE [[UV30]](s32), [[PTR_ADD32]](p5) :: (store 4 into %stack.0 + 120, align 8, basealign 256, addrspace 5)
; CHECK: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
; CHECK: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK: G_STORE [[UV31]](s32), [[PTR_ADD33]](p5) :: (store 4 into %stack.0 + 124, align 256, addrspace 5)
; CHECK: G_STORE [[UV31]](s32), [[PTR_ADD33]](p5) :: (store 4 into %stack.0 + 124, basealign 256, addrspace 5)
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
; CHECK: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK: G_STORE [[UV32]](s32), [[PTR_ADD34]](p5) :: (store 4 into %stack.0 + 128, align 256, addrspace 5)
; CHECK: G_STORE [[UV32]](s32), [[PTR_ADD34]](p5) :: (store 4 into %stack.0 + 128, align 128, basealign 256, addrspace 5)
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
; CHECK: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK: G_STORE [[UV33]](s32), [[PTR_ADD35]](p5) :: (store 4 into %stack.0 + 132, align 256, addrspace 5)
; CHECK: G_STORE [[UV33]](s32), [[PTR_ADD35]](p5) :: (store 4 into %stack.0 + 132, basealign 256, addrspace 5)
; CHECK: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
; CHECK: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK: G_STORE [[UV34]](s32), [[PTR_ADD36]](p5) :: (store 4 into %stack.0 + 136, align 256, addrspace 5)
; CHECK: G_STORE [[UV34]](s32), [[PTR_ADD36]](p5) :: (store 4 into %stack.0 + 136, align 8, basealign 256, addrspace 5)
; CHECK: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
; CHECK: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK: G_STORE [[UV35]](s32), [[PTR_ADD37]](p5) :: (store 4 into %stack.0 + 140, align 256, addrspace 5)
; CHECK: G_STORE [[UV35]](s32), [[PTR_ADD37]](p5) :: (store 4 into %stack.0 + 140, basealign 256, addrspace 5)
; CHECK: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
; CHECK: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK: G_STORE [[UV36]](s32), [[PTR_ADD38]](p5) :: (store 4 into %stack.0 + 144, align 256, addrspace 5)
; CHECK: G_STORE [[UV36]](s32), [[PTR_ADD38]](p5) :: (store 4 into %stack.0 + 144, align 16, basealign 256, addrspace 5)
; CHECK: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
; CHECK: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK: G_STORE [[UV37]](s32), [[PTR_ADD39]](p5) :: (store 4 into %stack.0 + 148, align 256, addrspace 5)
; CHECK: G_STORE [[UV37]](s32), [[PTR_ADD39]](p5) :: (store 4 into %stack.0 + 148, basealign 256, addrspace 5)
; CHECK: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
; CHECK: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK: G_STORE [[UV38]](s32), [[PTR_ADD40]](p5) :: (store 4 into %stack.0 + 152, align 256, addrspace 5)
; CHECK: G_STORE [[UV38]](s32), [[PTR_ADD40]](p5) :: (store 4 into %stack.0 + 152, align 8, basealign 256, addrspace 5)
; CHECK: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
; CHECK: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK: G_STORE [[UV39]](s32), [[PTR_ADD41]](p5) :: (store 4 into %stack.0 + 156, align 256, addrspace 5)
; CHECK: G_STORE [[UV39]](s32), [[PTR_ADD41]](p5) :: (store 4 into %stack.0 + 156, basealign 256, addrspace 5)
; CHECK: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
; CHECK: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK: G_STORE [[UV40]](s32), [[PTR_ADD42]](p5) :: (store 4 into %stack.0 + 160, align 256, addrspace 5)
; CHECK: G_STORE [[UV40]](s32), [[PTR_ADD42]](p5) :: (store 4 into %stack.0 + 160, align 32, basealign 256, addrspace 5)
; CHECK: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
; CHECK: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK: G_STORE [[UV41]](s32), [[PTR_ADD43]](p5) :: (store 4 into %stack.0 + 164, align 256, addrspace 5)
; CHECK: G_STORE [[UV41]](s32), [[PTR_ADD43]](p5) :: (store 4 into %stack.0 + 164, basealign 256, addrspace 5)
; CHECK: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
; CHECK: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK: G_STORE [[UV42]](s32), [[PTR_ADD44]](p5) :: (store 4 into %stack.0 + 168, align 256, addrspace 5)
; CHECK: G_STORE [[UV42]](s32), [[PTR_ADD44]](p5) :: (store 4 into %stack.0 + 168, align 8, basealign 256, addrspace 5)
; CHECK: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
; CHECK: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK: G_STORE [[UV43]](s32), [[PTR_ADD45]](p5) :: (store 4 into %stack.0 + 172, align 256, addrspace 5)
; CHECK: G_STORE [[UV43]](s32), [[PTR_ADD45]](p5) :: (store 4 into %stack.0 + 172, basealign 256, addrspace 5)
; CHECK: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
; CHECK: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK: G_STORE [[UV44]](s32), [[PTR_ADD46]](p5) :: (store 4 into %stack.0 + 176, align 256, addrspace 5)
; CHECK: G_STORE [[UV44]](s32), [[PTR_ADD46]](p5) :: (store 4 into %stack.0 + 176, align 16, basealign 256, addrspace 5)
; CHECK: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
; CHECK: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK: G_STORE [[UV45]](s32), [[PTR_ADD47]](p5) :: (store 4 into %stack.0 + 180, align 256, addrspace 5)
; CHECK: G_STORE [[UV45]](s32), [[PTR_ADD47]](p5) :: (store 4 into %stack.0 + 180, basealign 256, addrspace 5)
; CHECK: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
; CHECK: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK: G_STORE [[UV46]](s32), [[PTR_ADD48]](p5) :: (store 4 into %stack.0 + 184, align 256, addrspace 5)
; CHECK: G_STORE [[UV46]](s32), [[PTR_ADD48]](p5) :: (store 4 into %stack.0 + 184, align 8, basealign 256, addrspace 5)
; CHECK: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
; CHECK: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK: G_STORE [[UV47]](s32), [[PTR_ADD49]](p5) :: (store 4 into %stack.0 + 188, align 256, addrspace 5)
; CHECK: G_STORE [[UV47]](s32), [[PTR_ADD49]](p5) :: (store 4 into %stack.0 + 188, basealign 256, addrspace 5)
; CHECK: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
; CHECK: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK: G_STORE [[UV48]](s32), [[PTR_ADD50]](p5) :: (store 4 into %stack.0 + 192, align 256, addrspace 5)
; CHECK: G_STORE [[UV48]](s32), [[PTR_ADD50]](p5) :: (store 4 into %stack.0 + 192, align 64, basealign 256, addrspace 5)
; CHECK: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
; CHECK: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK: G_STORE [[UV49]](s32), [[PTR_ADD51]](p5) :: (store 4 into %stack.0 + 196, align 256, addrspace 5)
; CHECK: G_STORE [[UV49]](s32), [[PTR_ADD51]](p5) :: (store 4 into %stack.0 + 196, basealign 256, addrspace 5)
; CHECK: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
; CHECK: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK: G_STORE [[UV50]](s32), [[PTR_ADD52]](p5) :: (store 4 into %stack.0 + 200, align 256, addrspace 5)
; CHECK: G_STORE [[UV50]](s32), [[PTR_ADD52]](p5) :: (store 4 into %stack.0 + 200, align 8, basealign 256, addrspace 5)
; CHECK: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
; CHECK: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK: G_STORE [[UV51]](s32), [[PTR_ADD53]](p5) :: (store 4 into %stack.0 + 204, align 256, addrspace 5)
; CHECK: G_STORE [[UV51]](s32), [[PTR_ADD53]](p5) :: (store 4 into %stack.0 + 204, basealign 256, addrspace 5)
; CHECK: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
; CHECK: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK: G_STORE [[UV52]](s32), [[PTR_ADD54]](p5) :: (store 4 into %stack.0 + 208, align 256, addrspace 5)
; CHECK: G_STORE [[UV52]](s32), [[PTR_ADD54]](p5) :: (store 4 into %stack.0 + 208, align 16, basealign 256, addrspace 5)
; CHECK: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
; CHECK: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK: G_STORE [[UV53]](s32), [[PTR_ADD55]](p5) :: (store 4 into %stack.0 + 212, align 256, addrspace 5)
; CHECK: G_STORE [[UV53]](s32), [[PTR_ADD55]](p5) :: (store 4 into %stack.0 + 212, basealign 256, addrspace 5)
; CHECK: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
; CHECK: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK: G_STORE [[UV54]](s32), [[PTR_ADD56]](p5) :: (store 4 into %stack.0 + 216, align 256, addrspace 5)
; CHECK: G_STORE [[UV54]](s32), [[PTR_ADD56]](p5) :: (store 4 into %stack.0 + 216, align 8, basealign 256, addrspace 5)
; CHECK: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
; CHECK: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK: G_STORE [[UV55]](s32), [[PTR_ADD57]](p5) :: (store 4 into %stack.0 + 220, align 256, addrspace 5)
; CHECK: G_STORE [[UV55]](s32), [[PTR_ADD57]](p5) :: (store 4 into %stack.0 + 220, basealign 256, addrspace 5)
; CHECK: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
; CHECK: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK: G_STORE [[UV56]](s32), [[PTR_ADD58]](p5) :: (store 4 into %stack.0 + 224, align 256, addrspace 5)
; CHECK: G_STORE [[UV56]](s32), [[PTR_ADD58]](p5) :: (store 4 into %stack.0 + 224, align 32, basealign 256, addrspace 5)
; CHECK: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
; CHECK: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK: G_STORE [[UV57]](s32), [[PTR_ADD59]](p5) :: (store 4 into %stack.0 + 228, align 256, addrspace 5)
; CHECK: G_STORE [[UV57]](s32), [[PTR_ADD59]](p5) :: (store 4 into %stack.0 + 228, basealign 256, addrspace 5)
; CHECK: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
; CHECK: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK: G_STORE [[UV58]](s32), [[PTR_ADD60]](p5) :: (store 4 into %stack.0 + 232, align 256, addrspace 5)
; CHECK: G_STORE [[UV58]](s32), [[PTR_ADD60]](p5) :: (store 4 into %stack.0 + 232, align 8, basealign 256, addrspace 5)
; CHECK: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
; CHECK: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK: G_STORE [[UV59]](s32), [[PTR_ADD61]](p5) :: (store 4 into %stack.0 + 236, align 256, addrspace 5)
; CHECK: G_STORE [[UV59]](s32), [[PTR_ADD61]](p5) :: (store 4 into %stack.0 + 236, basealign 256, addrspace 5)
; CHECK: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
; CHECK: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK: G_STORE [[UV60]](s32), [[PTR_ADD62]](p5) :: (store 4 into %stack.0 + 240, align 256, addrspace 5)
; CHECK: G_STORE [[UV60]](s32), [[PTR_ADD62]](p5) :: (store 4 into %stack.0 + 240, align 16, basealign 256, addrspace 5)
; CHECK: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
; CHECK: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK: G_STORE [[UV61]](s32), [[PTR_ADD63]](p5) :: (store 4 into %stack.0 + 244, align 256, addrspace 5)
; CHECK: G_STORE [[UV61]](s32), [[PTR_ADD63]](p5) :: (store 4 into %stack.0 + 244, basealign 256, addrspace 5)
; CHECK: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
; CHECK: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK: G_STORE [[UV62]](s32), [[PTR_ADD64]](p5) :: (store 4 into %stack.0 + 248, align 256, addrspace 5)
; CHECK: G_STORE [[UV62]](s32), [[PTR_ADD64]](p5) :: (store 4 into %stack.0 + 248, align 8, basealign 256, addrspace 5)
; CHECK: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
; CHECK: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK: G_STORE [[UV63]](s32), [[PTR_ADD65]](p5) :: (store 4 into %stack.0 + 252, align 256, addrspace 5)
; CHECK: G_STORE [[UV63]](s32), [[PTR_ADD65]](p5) :: (store 4 into %stack.0 + 252, basealign 256, addrspace 5)
; CHECK: [[C66:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C66]]
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[C3]]

View File

@ -399,255 +399,255 @@ body: |
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C4]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(p5) = COPY [[PTR_ADD3]](p5)
; CHECK: G_STORE [[UV1]](s32), [[COPY2]](p5) :: (store 4 into %stack.0 + 4, align 256, addrspace 5)
; CHECK: G_STORE [[UV1]](s32), [[COPY2]](p5) :: (store 4 into %stack.0 + 4, basealign 256, addrspace 5)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C5]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(p5) = COPY [[PTR_ADD4]](p5)
; CHECK: G_STORE [[UV2]](s32), [[COPY3]](p5) :: (store 4 into %stack.0 + 8, align 256, addrspace 5)
; CHECK: G_STORE [[UV2]](s32), [[COPY3]](p5) :: (store 4 into %stack.0 + 8, align 8, basealign 256, addrspace 5)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C6]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(p5) = COPY [[PTR_ADD5]](p5)
; CHECK: G_STORE [[UV3]](s32), [[COPY4]](p5) :: (store 4 into %stack.0 + 12, align 256, addrspace 5)
; CHECK: G_STORE [[UV3]](s32), [[COPY4]](p5) :: (store 4 into %stack.0 + 12, basealign 256, addrspace 5)
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C7]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(p5) = COPY [[PTR_ADD6]](p5)
; CHECK: G_STORE [[UV4]](s32), [[COPY5]](p5) :: (store 4 into %stack.0 + 16, align 256, addrspace 5)
; CHECK: G_STORE [[UV4]](s32), [[COPY5]](p5) :: (store 4 into %stack.0 + 16, align 16, basealign 256, addrspace 5)
; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C8]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(p5) = COPY [[PTR_ADD7]](p5)
; CHECK: G_STORE [[UV5]](s32), [[COPY6]](p5) :: (store 4 into %stack.0 + 20, align 256, addrspace 5)
; CHECK: G_STORE [[UV5]](s32), [[COPY6]](p5) :: (store 4 into %stack.0 + 20, basealign 256, addrspace 5)
; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C9]](s32)
; CHECK: [[COPY7:%[0-9]+]]:_(p5) = COPY [[PTR_ADD8]](p5)
; CHECK: G_STORE [[UV6]](s32), [[COPY7]](p5) :: (store 4 into %stack.0 + 24, align 256, addrspace 5)
; CHECK: G_STORE [[UV6]](s32), [[COPY7]](p5) :: (store 4 into %stack.0 + 24, align 8, basealign 256, addrspace 5)
; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C10]](s32)
; CHECK: [[COPY8:%[0-9]+]]:_(p5) = COPY [[PTR_ADD9]](p5)
; CHECK: G_STORE [[UV7]](s32), [[COPY8]](p5) :: (store 4 into %stack.0 + 28, align 256, addrspace 5)
; CHECK: G_STORE [[UV7]](s32), [[COPY8]](p5) :: (store 4 into %stack.0 + 28, basealign 256, addrspace 5)
; CHECK: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C11]](s32)
; CHECK: [[COPY9:%[0-9]+]]:_(p5) = COPY [[PTR_ADD10]](p5)
; CHECK: G_STORE [[UV8]](s32), [[COPY9]](p5) :: (store 4 into %stack.0 + 32, align 256, addrspace 5)
; CHECK: G_STORE [[UV8]](s32), [[COPY9]](p5) :: (store 4 into %stack.0 + 32, align 32, basealign 256, addrspace 5)
; CHECK: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 36
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C12]](s32)
; CHECK: [[COPY10:%[0-9]+]]:_(p5) = COPY [[PTR_ADD11]](p5)
; CHECK: G_STORE [[UV9]](s32), [[COPY10]](p5) :: (store 4 into %stack.0 + 36, align 256, addrspace 5)
; CHECK: G_STORE [[UV9]](s32), [[COPY10]](p5) :: (store 4 into %stack.0 + 36, basealign 256, addrspace 5)
; CHECK: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C13]](s32)
; CHECK: [[COPY11:%[0-9]+]]:_(p5) = COPY [[PTR_ADD12]](p5)
; CHECK: G_STORE [[UV10]](s32), [[COPY11]](p5) :: (store 4 into %stack.0 + 40, align 256, addrspace 5)
; CHECK: G_STORE [[UV10]](s32), [[COPY11]](p5) :: (store 4 into %stack.0 + 40, align 8, basealign 256, addrspace 5)
; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C14]](s32)
; CHECK: [[COPY12:%[0-9]+]]:_(p5) = COPY [[PTR_ADD13]](p5)
; CHECK: G_STORE [[UV11]](s32), [[COPY12]](p5) :: (store 4 into %stack.0 + 44, align 256, addrspace 5)
; CHECK: G_STORE [[UV11]](s32), [[COPY12]](p5) :: (store 4 into %stack.0 + 44, basealign 256, addrspace 5)
; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 48
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C15]](s32)
; CHECK: [[COPY13:%[0-9]+]]:_(p5) = COPY [[PTR_ADD14]](p5)
; CHECK: G_STORE [[UV12]](s32), [[COPY13]](p5) :: (store 4 into %stack.0 + 48, align 256, addrspace 5)
; CHECK: G_STORE [[UV12]](s32), [[COPY13]](p5) :: (store 4 into %stack.0 + 48, align 16, basealign 256, addrspace 5)
; CHECK: [[C16:%[0-9]+]]:_(s32) = G_CONSTANT i32 52
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C16]](s32)
; CHECK: [[COPY14:%[0-9]+]]:_(p5) = COPY [[PTR_ADD15]](p5)
; CHECK: G_STORE [[UV13]](s32), [[COPY14]](p5) :: (store 4 into %stack.0 + 52, align 256, addrspace 5)
; CHECK: G_STORE [[UV13]](s32), [[COPY14]](p5) :: (store 4 into %stack.0 + 52, basealign 256, addrspace 5)
; CHECK: [[C17:%[0-9]+]]:_(s32) = G_CONSTANT i32 56
; CHECK: [[PTR_ADD16:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C17]](s32)
; CHECK: [[COPY15:%[0-9]+]]:_(p5) = COPY [[PTR_ADD16]](p5)
; CHECK: G_STORE [[UV14]](s32), [[COPY15]](p5) :: (store 4 into %stack.0 + 56, align 256, addrspace 5)
; CHECK: G_STORE [[UV14]](s32), [[COPY15]](p5) :: (store 4 into %stack.0 + 56, align 8, basealign 256, addrspace 5)
; CHECK: [[C18:%[0-9]+]]:_(s32) = G_CONSTANT i32 60
; CHECK: [[PTR_ADD17:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C18]](s32)
; CHECK: [[COPY16:%[0-9]+]]:_(p5) = COPY [[PTR_ADD17]](p5)
; CHECK: G_STORE [[UV15]](s32), [[COPY16]](p5) :: (store 4 into %stack.0 + 60, align 256, addrspace 5)
; CHECK: G_STORE [[UV15]](s32), [[COPY16]](p5) :: (store 4 into %stack.0 + 60, basealign 256, addrspace 5)
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; CHECK: [[PTR_ADD18:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C19]](s32)
; CHECK: [[COPY17:%[0-9]+]]:_(p5) = COPY [[PTR_ADD18]](p5)
; CHECK: G_STORE [[UV16]](s32), [[COPY17]](p5) :: (store 4 into %stack.0 + 64, align 256, addrspace 5)
; CHECK: G_STORE [[UV16]](s32), [[COPY17]](p5) :: (store 4 into %stack.0 + 64, align 64, basealign 256, addrspace 5)
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 68
; CHECK: [[PTR_ADD19:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C20]](s32)
; CHECK: [[COPY18:%[0-9]+]]:_(p5) = COPY [[PTR_ADD19]](p5)
; CHECK: G_STORE [[UV17]](s32), [[COPY18]](p5) :: (store 4 into %stack.0 + 68, align 256, addrspace 5)
; CHECK: G_STORE [[UV17]](s32), [[COPY18]](p5) :: (store 4 into %stack.0 + 68, basealign 256, addrspace 5)
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 72
; CHECK: [[PTR_ADD20:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C21]](s32)
; CHECK: [[COPY19:%[0-9]+]]:_(p5) = COPY [[PTR_ADD20]](p5)
; CHECK: G_STORE [[UV18]](s32), [[COPY19]](p5) :: (store 4 into %stack.0 + 72, align 256, addrspace 5)
; CHECK: G_STORE [[UV18]](s32), [[COPY19]](p5) :: (store 4 into %stack.0 + 72, align 8, basealign 256, addrspace 5)
; CHECK: [[C22:%[0-9]+]]:_(s32) = G_CONSTANT i32 76
; CHECK: [[PTR_ADD21:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C22]](s32)
; CHECK: [[COPY20:%[0-9]+]]:_(p5) = COPY [[PTR_ADD21]](p5)
; CHECK: G_STORE [[UV19]](s32), [[COPY20]](p5) :: (store 4 into %stack.0 + 76, align 256, addrspace 5)
; CHECK: G_STORE [[UV19]](s32), [[COPY20]](p5) :: (store 4 into %stack.0 + 76, basealign 256, addrspace 5)
; CHECK: [[C23:%[0-9]+]]:_(s32) = G_CONSTANT i32 80
; CHECK: [[PTR_ADD22:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C23]](s32)
; CHECK: [[COPY21:%[0-9]+]]:_(p5) = COPY [[PTR_ADD22]](p5)
; CHECK: G_STORE [[UV20]](s32), [[COPY21]](p5) :: (store 4 into %stack.0 + 80, align 256, addrspace 5)
; CHECK: G_STORE [[UV20]](s32), [[COPY21]](p5) :: (store 4 into %stack.0 + 80, align 16, basealign 256, addrspace 5)
; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 84
; CHECK: [[PTR_ADD23:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C24]](s32)
; CHECK: [[COPY22:%[0-9]+]]:_(p5) = COPY [[PTR_ADD23]](p5)
; CHECK: G_STORE [[UV21]](s32), [[COPY22]](p5) :: (store 4 into %stack.0 + 84, align 256, addrspace 5)
; CHECK: G_STORE [[UV21]](s32), [[COPY22]](p5) :: (store 4 into %stack.0 + 84, basealign 256, addrspace 5)
; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 88
; CHECK: [[PTR_ADD24:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C25]](s32)
; CHECK: [[COPY23:%[0-9]+]]:_(p5) = COPY [[PTR_ADD24]](p5)
; CHECK: G_STORE [[UV22]](s32), [[COPY23]](p5) :: (store 4 into %stack.0 + 88, align 256, addrspace 5)
; CHECK: G_STORE [[UV22]](s32), [[COPY23]](p5) :: (store 4 into %stack.0 + 88, align 8, basealign 256, addrspace 5)
; CHECK: [[C26:%[0-9]+]]:_(s32) = G_CONSTANT i32 92
; CHECK: [[PTR_ADD25:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C26]](s32)
; CHECK: [[COPY24:%[0-9]+]]:_(p5) = COPY [[PTR_ADD25]](p5)
; CHECK: G_STORE [[UV23]](s32), [[COPY24]](p5) :: (store 4 into %stack.0 + 92, align 256, addrspace 5)
; CHECK: G_STORE [[UV23]](s32), [[COPY24]](p5) :: (store 4 into %stack.0 + 92, basealign 256, addrspace 5)
; CHECK: [[C27:%[0-9]+]]:_(s32) = G_CONSTANT i32 96
; CHECK: [[PTR_ADD26:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C27]](s32)
; CHECK: [[COPY25:%[0-9]+]]:_(p5) = COPY [[PTR_ADD26]](p5)
; CHECK: G_STORE [[UV24]](s32), [[COPY25]](p5) :: (store 4 into %stack.0 + 96, align 256, addrspace 5)
; CHECK: G_STORE [[UV24]](s32), [[COPY25]](p5) :: (store 4 into %stack.0 + 96, align 32, basealign 256, addrspace 5)
; CHECK: [[C28:%[0-9]+]]:_(s32) = G_CONSTANT i32 100
; CHECK: [[PTR_ADD27:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C28]](s32)
; CHECK: [[COPY26:%[0-9]+]]:_(p5) = COPY [[PTR_ADD27]](p5)
; CHECK: G_STORE [[UV25]](s32), [[COPY26]](p5) :: (store 4 into %stack.0 + 100, align 256, addrspace 5)
; CHECK: G_STORE [[UV25]](s32), [[COPY26]](p5) :: (store 4 into %stack.0 + 100, basealign 256, addrspace 5)
; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 104
; CHECK: [[PTR_ADD28:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C29]](s32)
; CHECK: [[COPY27:%[0-9]+]]:_(p5) = COPY [[PTR_ADD28]](p5)
; CHECK: G_STORE [[UV26]](s32), [[COPY27]](p5) :: (store 4 into %stack.0 + 104, align 256, addrspace 5)
; CHECK: G_STORE [[UV26]](s32), [[COPY27]](p5) :: (store 4 into %stack.0 + 104, align 8, basealign 256, addrspace 5)
; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 108
; CHECK: [[PTR_ADD29:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C30]](s32)
; CHECK: [[COPY28:%[0-9]+]]:_(p5) = COPY [[PTR_ADD29]](p5)
; CHECK: G_STORE [[UV27]](s32), [[COPY28]](p5) :: (store 4 into %stack.0 + 108, align 256, addrspace 5)
; CHECK: G_STORE [[UV27]](s32), [[COPY28]](p5) :: (store 4 into %stack.0 + 108, basealign 256, addrspace 5)
; CHECK: [[C31:%[0-9]+]]:_(s32) = G_CONSTANT i32 112
; CHECK: [[PTR_ADD30:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C31]](s32)
; CHECK: [[COPY29:%[0-9]+]]:_(p5) = COPY [[PTR_ADD30]](p5)
; CHECK: G_STORE [[UV28]](s32), [[COPY29]](p5) :: (store 4 into %stack.0 + 112, align 256, addrspace 5)
; CHECK: G_STORE [[UV28]](s32), [[COPY29]](p5) :: (store 4 into %stack.0 + 112, align 16, basealign 256, addrspace 5)
; CHECK: [[C32:%[0-9]+]]:_(s32) = G_CONSTANT i32 116
; CHECK: [[PTR_ADD31:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C32]](s32)
; CHECK: [[COPY30:%[0-9]+]]:_(p5) = COPY [[PTR_ADD31]](p5)
; CHECK: G_STORE [[UV29]](s32), [[COPY30]](p5) :: (store 4 into %stack.0 + 116, align 256, addrspace 5)
; CHECK: G_STORE [[UV29]](s32), [[COPY30]](p5) :: (store 4 into %stack.0 + 116, basealign 256, addrspace 5)
; CHECK: [[C33:%[0-9]+]]:_(s32) = G_CONSTANT i32 120
; CHECK: [[PTR_ADD32:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C33]](s32)
; CHECK: [[COPY31:%[0-9]+]]:_(p5) = COPY [[PTR_ADD32]](p5)
; CHECK: G_STORE [[UV30]](s32), [[COPY31]](p5) :: (store 4 into %stack.0 + 120, align 256, addrspace 5)
; CHECK: G_STORE [[UV30]](s32), [[COPY31]](p5) :: (store 4 into %stack.0 + 120, align 8, basealign 256, addrspace 5)
; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 124
; CHECK: [[PTR_ADD33:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C34]](s32)
; CHECK: [[COPY32:%[0-9]+]]:_(p5) = COPY [[PTR_ADD33]](p5)
; CHECK: G_STORE [[UV31]](s32), [[COPY32]](p5) :: (store 4 into %stack.0 + 124, align 256, addrspace 5)
; CHECK: G_STORE [[UV31]](s32), [[COPY32]](p5) :: (store 4 into %stack.0 + 124, basealign 256, addrspace 5)
; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
; CHECK: [[PTR_ADD34:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C35]](s32)
; CHECK: [[COPY33:%[0-9]+]]:_(p5) = COPY [[PTR_ADD34]](p5)
; CHECK: G_STORE [[UV32]](s32), [[COPY33]](p5) :: (store 4 into %stack.0 + 128, align 256, addrspace 5)
; CHECK: G_STORE [[UV32]](s32), [[COPY33]](p5) :: (store 4 into %stack.0 + 128, align 128, basealign 256, addrspace 5)
; CHECK: [[C36:%[0-9]+]]:_(s32) = G_CONSTANT i32 132
; CHECK: [[PTR_ADD35:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C36]](s32)
; CHECK: [[COPY34:%[0-9]+]]:_(p5) = COPY [[PTR_ADD35]](p5)
; CHECK: G_STORE [[UV33]](s32), [[COPY34]](p5) :: (store 4 into %stack.0 + 132, align 256, addrspace 5)
; CHECK: G_STORE [[UV33]](s32), [[COPY34]](p5) :: (store 4 into %stack.0 + 132, basealign 256, addrspace 5)
; CHECK: [[C37:%[0-9]+]]:_(s32) = G_CONSTANT i32 136
; CHECK: [[PTR_ADD36:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C37]](s32)
; CHECK: [[COPY35:%[0-9]+]]:_(p5) = COPY [[PTR_ADD36]](p5)
; CHECK: G_STORE [[UV34]](s32), [[COPY35]](p5) :: (store 4 into %stack.0 + 136, align 256, addrspace 5)
; CHECK: G_STORE [[UV34]](s32), [[COPY35]](p5) :: (store 4 into %stack.0 + 136, align 8, basealign 256, addrspace 5)
; CHECK: [[C38:%[0-9]+]]:_(s32) = G_CONSTANT i32 140
; CHECK: [[PTR_ADD37:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C38]](s32)
; CHECK: [[COPY36:%[0-9]+]]:_(p5) = COPY [[PTR_ADD37]](p5)
; CHECK: G_STORE [[UV35]](s32), [[COPY36]](p5) :: (store 4 into %stack.0 + 140, align 256, addrspace 5)
; CHECK: G_STORE [[UV35]](s32), [[COPY36]](p5) :: (store 4 into %stack.0 + 140, basealign 256, addrspace 5)
; CHECK: [[C39:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
; CHECK: [[PTR_ADD38:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C39]](s32)
; CHECK: [[COPY37:%[0-9]+]]:_(p5) = COPY [[PTR_ADD38]](p5)
; CHECK: G_STORE [[UV36]](s32), [[COPY37]](p5) :: (store 4 into %stack.0 + 144, align 256, addrspace 5)
; CHECK: G_STORE [[UV36]](s32), [[COPY37]](p5) :: (store 4 into %stack.0 + 144, align 16, basealign 256, addrspace 5)
; CHECK: [[C40:%[0-9]+]]:_(s32) = G_CONSTANT i32 148
; CHECK: [[PTR_ADD39:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C40]](s32)
; CHECK: [[COPY38:%[0-9]+]]:_(p5) = COPY [[PTR_ADD39]](p5)
; CHECK: G_STORE [[UV37]](s32), [[COPY38]](p5) :: (store 4 into %stack.0 + 148, align 256, addrspace 5)
; CHECK: G_STORE [[UV37]](s32), [[COPY38]](p5) :: (store 4 into %stack.0 + 148, basealign 256, addrspace 5)
; CHECK: [[C41:%[0-9]+]]:_(s32) = G_CONSTANT i32 152
; CHECK: [[PTR_ADD40:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C41]](s32)
; CHECK: [[COPY39:%[0-9]+]]:_(p5) = COPY [[PTR_ADD40]](p5)
; CHECK: G_STORE [[UV38]](s32), [[COPY39]](p5) :: (store 4 into %stack.0 + 152, align 256, addrspace 5)
; CHECK: G_STORE [[UV38]](s32), [[COPY39]](p5) :: (store 4 into %stack.0 + 152, align 8, basealign 256, addrspace 5)
; CHECK: [[C42:%[0-9]+]]:_(s32) = G_CONSTANT i32 156
; CHECK: [[PTR_ADD41:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C42]](s32)
; CHECK: [[COPY40:%[0-9]+]]:_(p5) = COPY [[PTR_ADD41]](p5)
; CHECK: G_STORE [[UV39]](s32), [[COPY40]](p5) :: (store 4 into %stack.0 + 156, align 256, addrspace 5)
; CHECK: G_STORE [[UV39]](s32), [[COPY40]](p5) :: (store 4 into %stack.0 + 156, basealign 256, addrspace 5)
; CHECK: [[C43:%[0-9]+]]:_(s32) = G_CONSTANT i32 160
; CHECK: [[PTR_ADD42:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C43]](s32)
; CHECK: [[COPY41:%[0-9]+]]:_(p5) = COPY [[PTR_ADD42]](p5)
; CHECK: G_STORE [[UV40]](s32), [[COPY41]](p5) :: (store 4 into %stack.0 + 160, align 256, addrspace 5)
; CHECK: G_STORE [[UV40]](s32), [[COPY41]](p5) :: (store 4 into %stack.0 + 160, align 32, basealign 256, addrspace 5)
; CHECK: [[C44:%[0-9]+]]:_(s32) = G_CONSTANT i32 164
; CHECK: [[PTR_ADD43:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C44]](s32)
; CHECK: [[COPY42:%[0-9]+]]:_(p5) = COPY [[PTR_ADD43]](p5)
; CHECK: G_STORE [[UV41]](s32), [[COPY42]](p5) :: (store 4 into %stack.0 + 164, align 256, addrspace 5)
; CHECK: G_STORE [[UV41]](s32), [[COPY42]](p5) :: (store 4 into %stack.0 + 164, basealign 256, addrspace 5)
; CHECK: [[C45:%[0-9]+]]:_(s32) = G_CONSTANT i32 168
; CHECK: [[PTR_ADD44:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C45]](s32)
; CHECK: [[COPY43:%[0-9]+]]:_(p5) = COPY [[PTR_ADD44]](p5)
; CHECK: G_STORE [[UV42]](s32), [[COPY43]](p5) :: (store 4 into %stack.0 + 168, align 256, addrspace 5)
; CHECK: G_STORE [[UV42]](s32), [[COPY43]](p5) :: (store 4 into %stack.0 + 168, align 8, basealign 256, addrspace 5)
; CHECK: [[C46:%[0-9]+]]:_(s32) = G_CONSTANT i32 172
; CHECK: [[PTR_ADD45:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C46]](s32)
; CHECK: [[COPY44:%[0-9]+]]:_(p5) = COPY [[PTR_ADD45]](p5)
; CHECK: G_STORE [[UV43]](s32), [[COPY44]](p5) :: (store 4 into %stack.0 + 172, align 256, addrspace 5)
; CHECK: G_STORE [[UV43]](s32), [[COPY44]](p5) :: (store 4 into %stack.0 + 172, basealign 256, addrspace 5)
; CHECK: [[C47:%[0-9]+]]:_(s32) = G_CONSTANT i32 176
; CHECK: [[PTR_ADD46:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C47]](s32)
; CHECK: [[COPY45:%[0-9]+]]:_(p5) = COPY [[PTR_ADD46]](p5)
; CHECK: G_STORE [[UV44]](s32), [[COPY45]](p5) :: (store 4 into %stack.0 + 176, align 256, addrspace 5)
; CHECK: G_STORE [[UV44]](s32), [[COPY45]](p5) :: (store 4 into %stack.0 + 176, align 16, basealign 256, addrspace 5)
; CHECK: [[C48:%[0-9]+]]:_(s32) = G_CONSTANT i32 180
; CHECK: [[PTR_ADD47:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C48]](s32)
; CHECK: [[COPY46:%[0-9]+]]:_(p5) = COPY [[PTR_ADD47]](p5)
; CHECK: G_STORE [[UV45]](s32), [[COPY46]](p5) :: (store 4 into %stack.0 + 180, align 256, addrspace 5)
; CHECK: G_STORE [[UV45]](s32), [[COPY46]](p5) :: (store 4 into %stack.0 + 180, basealign 256, addrspace 5)
; CHECK: [[C49:%[0-9]+]]:_(s32) = G_CONSTANT i32 184
; CHECK: [[PTR_ADD48:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C49]](s32)
; CHECK: [[COPY47:%[0-9]+]]:_(p5) = COPY [[PTR_ADD48]](p5)
; CHECK: G_STORE [[UV46]](s32), [[COPY47]](p5) :: (store 4 into %stack.0 + 184, align 256, addrspace 5)
; CHECK: G_STORE [[UV46]](s32), [[COPY47]](p5) :: (store 4 into %stack.0 + 184, align 8, basealign 256, addrspace 5)
; CHECK: [[C50:%[0-9]+]]:_(s32) = G_CONSTANT i32 188
; CHECK: [[PTR_ADD49:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C50]](s32)
; CHECK: [[COPY48:%[0-9]+]]:_(p5) = COPY [[PTR_ADD49]](p5)
; CHECK: G_STORE [[UV47]](s32), [[COPY48]](p5) :: (store 4 into %stack.0 + 188, align 256, addrspace 5)
; CHECK: G_STORE [[UV47]](s32), [[COPY48]](p5) :: (store 4 into %stack.0 + 188, basealign 256, addrspace 5)
; CHECK: [[C51:%[0-9]+]]:_(s32) = G_CONSTANT i32 192
; CHECK: [[PTR_ADD50:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C51]](s32)
; CHECK: [[COPY49:%[0-9]+]]:_(p5) = COPY [[PTR_ADD50]](p5)
; CHECK: G_STORE [[UV48]](s32), [[COPY49]](p5) :: (store 4 into %stack.0 + 192, align 256, addrspace 5)
; CHECK: G_STORE [[UV48]](s32), [[COPY49]](p5) :: (store 4 into %stack.0 + 192, align 64, basealign 256, addrspace 5)
; CHECK: [[C52:%[0-9]+]]:_(s32) = G_CONSTANT i32 196
; CHECK: [[PTR_ADD51:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C52]](s32)
; CHECK: [[COPY50:%[0-9]+]]:_(p5) = COPY [[PTR_ADD51]](p5)
; CHECK: G_STORE [[UV49]](s32), [[COPY50]](p5) :: (store 4 into %stack.0 + 196, align 256, addrspace 5)
; CHECK: G_STORE [[UV49]](s32), [[COPY50]](p5) :: (store 4 into %stack.0 + 196, basealign 256, addrspace 5)
; CHECK: [[C53:%[0-9]+]]:_(s32) = G_CONSTANT i32 200
; CHECK: [[PTR_ADD52:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C53]](s32)
; CHECK: [[COPY51:%[0-9]+]]:_(p5) = COPY [[PTR_ADD52]](p5)
; CHECK: G_STORE [[UV50]](s32), [[COPY51]](p5) :: (store 4 into %stack.0 + 200, align 256, addrspace 5)
; CHECK: G_STORE [[UV50]](s32), [[COPY51]](p5) :: (store 4 into %stack.0 + 200, align 8, basealign 256, addrspace 5)
; CHECK: [[C54:%[0-9]+]]:_(s32) = G_CONSTANT i32 204
; CHECK: [[PTR_ADD53:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C54]](s32)
; CHECK: [[COPY52:%[0-9]+]]:_(p5) = COPY [[PTR_ADD53]](p5)
; CHECK: G_STORE [[UV51]](s32), [[COPY52]](p5) :: (store 4 into %stack.0 + 204, align 256, addrspace 5)
; CHECK: G_STORE [[UV51]](s32), [[COPY52]](p5) :: (store 4 into %stack.0 + 204, basealign 256, addrspace 5)
; CHECK: [[C55:%[0-9]+]]:_(s32) = G_CONSTANT i32 208
; CHECK: [[PTR_ADD54:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C55]](s32)
; CHECK: [[COPY53:%[0-9]+]]:_(p5) = COPY [[PTR_ADD54]](p5)
; CHECK: G_STORE [[UV52]](s32), [[COPY53]](p5) :: (store 4 into %stack.0 + 208, align 256, addrspace 5)
; CHECK: G_STORE [[UV52]](s32), [[COPY53]](p5) :: (store 4 into %stack.0 + 208, align 16, basealign 256, addrspace 5)
; CHECK: [[C56:%[0-9]+]]:_(s32) = G_CONSTANT i32 212
; CHECK: [[PTR_ADD55:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C56]](s32)
; CHECK: [[COPY54:%[0-9]+]]:_(p5) = COPY [[PTR_ADD55]](p5)
; CHECK: G_STORE [[UV53]](s32), [[COPY54]](p5) :: (store 4 into %stack.0 + 212, align 256, addrspace 5)
; CHECK: G_STORE [[UV53]](s32), [[COPY54]](p5) :: (store 4 into %stack.0 + 212, basealign 256, addrspace 5)
; CHECK: [[C57:%[0-9]+]]:_(s32) = G_CONSTANT i32 216
; CHECK: [[PTR_ADD56:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C57]](s32)
; CHECK: [[COPY55:%[0-9]+]]:_(p5) = COPY [[PTR_ADD56]](p5)
; CHECK: G_STORE [[UV54]](s32), [[COPY55]](p5) :: (store 4 into %stack.0 + 216, align 256, addrspace 5)
; CHECK: G_STORE [[UV54]](s32), [[COPY55]](p5) :: (store 4 into %stack.0 + 216, align 8, basealign 256, addrspace 5)
; CHECK: [[C58:%[0-9]+]]:_(s32) = G_CONSTANT i32 220
; CHECK: [[PTR_ADD57:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C58]](s32)
; CHECK: [[COPY56:%[0-9]+]]:_(p5) = COPY [[PTR_ADD57]](p5)
; CHECK: G_STORE [[UV55]](s32), [[COPY56]](p5) :: (store 4 into %stack.0 + 220, align 256, addrspace 5)
; CHECK: G_STORE [[UV55]](s32), [[COPY56]](p5) :: (store 4 into %stack.0 + 220, basealign 256, addrspace 5)
; CHECK: [[C59:%[0-9]+]]:_(s32) = G_CONSTANT i32 224
; CHECK: [[PTR_ADD58:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C59]](s32)
; CHECK: [[COPY57:%[0-9]+]]:_(p5) = COPY [[PTR_ADD58]](p5)
; CHECK: G_STORE [[UV56]](s32), [[COPY57]](p5) :: (store 4 into %stack.0 + 224, align 256, addrspace 5)
; CHECK: G_STORE [[UV56]](s32), [[COPY57]](p5) :: (store 4 into %stack.0 + 224, align 32, basealign 256, addrspace 5)
; CHECK: [[C60:%[0-9]+]]:_(s32) = G_CONSTANT i32 228
; CHECK: [[PTR_ADD59:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C60]](s32)
; CHECK: [[COPY58:%[0-9]+]]:_(p5) = COPY [[PTR_ADD59]](p5)
; CHECK: G_STORE [[UV57]](s32), [[COPY58]](p5) :: (store 4 into %stack.0 + 228, align 256, addrspace 5)
; CHECK: G_STORE [[UV57]](s32), [[COPY58]](p5) :: (store 4 into %stack.0 + 228, basealign 256, addrspace 5)
; CHECK: [[C61:%[0-9]+]]:_(s32) = G_CONSTANT i32 232
; CHECK: [[PTR_ADD60:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C61]](s32)
; CHECK: [[COPY59:%[0-9]+]]:_(p5) = COPY [[PTR_ADD60]](p5)
; CHECK: G_STORE [[UV58]](s32), [[COPY59]](p5) :: (store 4 into %stack.0 + 232, align 256, addrspace 5)
; CHECK: G_STORE [[UV58]](s32), [[COPY59]](p5) :: (store 4 into %stack.0 + 232, align 8, basealign 256, addrspace 5)
; CHECK: [[C62:%[0-9]+]]:_(s32) = G_CONSTANT i32 236
; CHECK: [[PTR_ADD61:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C62]](s32)
; CHECK: [[COPY60:%[0-9]+]]:_(p5) = COPY [[PTR_ADD61]](p5)
; CHECK: G_STORE [[UV59]](s32), [[COPY60]](p5) :: (store 4 into %stack.0 + 236, align 256, addrspace 5)
; CHECK: G_STORE [[UV59]](s32), [[COPY60]](p5) :: (store 4 into %stack.0 + 236, basealign 256, addrspace 5)
; CHECK: [[C63:%[0-9]+]]:_(s32) = G_CONSTANT i32 240
; CHECK: [[PTR_ADD62:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C63]](s32)
; CHECK: [[COPY61:%[0-9]+]]:_(p5) = COPY [[PTR_ADD62]](p5)
; CHECK: G_STORE [[UV60]](s32), [[COPY61]](p5) :: (store 4 into %stack.0 + 240, align 256, addrspace 5)
; CHECK: G_STORE [[UV60]](s32), [[COPY61]](p5) :: (store 4 into %stack.0 + 240, align 16, basealign 256, addrspace 5)
; CHECK: [[C64:%[0-9]+]]:_(s32) = G_CONSTANT i32 244
; CHECK: [[PTR_ADD63:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C64]](s32)
; CHECK: [[COPY62:%[0-9]+]]:_(p5) = COPY [[PTR_ADD63]](p5)
; CHECK: G_STORE [[UV61]](s32), [[COPY62]](p5) :: (store 4 into %stack.0 + 244, align 256, addrspace 5)
; CHECK: G_STORE [[UV61]](s32), [[COPY62]](p5) :: (store 4 into %stack.0 + 244, basealign 256, addrspace 5)
; CHECK: [[C65:%[0-9]+]]:_(s32) = G_CONSTANT i32 248
; CHECK: [[PTR_ADD64:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C65]](s32)
; CHECK: [[COPY63:%[0-9]+]]:_(p5) = COPY [[PTR_ADD64]](p5)
; CHECK: G_STORE [[UV62]](s32), [[COPY63]](p5) :: (store 4 into %stack.0 + 248, align 256, addrspace 5)
; CHECK: G_STORE [[UV62]](s32), [[COPY63]](p5) :: (store 4 into %stack.0 + 248, align 8, basealign 256, addrspace 5)
; CHECK: [[C66:%[0-9]+]]:_(s32) = G_CONSTANT i32 252
; CHECK: [[PTR_ADD65:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX]], [[C66]](s32)
; CHECK: [[COPY64:%[0-9]+]]:_(p5) = COPY [[PTR_ADD65]](p5)
; CHECK: G_STORE [[UV63]](s32), [[COPY64]](p5) :: (store 4 into %stack.0 + 252, align 256, addrspace 5)
; CHECK: G_STORE [[UV63]](s32), [[COPY64]](p5) :: (store 4 into %stack.0 + 252, basealign 256, addrspace 5)
; CHECK: [[C67:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C67]]
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[AND]], [[C4]]

View File

@ -800,7 +800,7 @@ define amdgpu_ps void @s_buffer_load_v16i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<16 x i16> addrspace(1)* undef`, align 32, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<16 x i16> addrspace(1)* undef` + 16, align 32, addrspace 1)
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<16 x i16> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
; CHECK: S_ENDPGM 0
; GREEDY-LABEL: name: s_buffer_load_v16i16_vgpr_offset
; GREEDY: bb.1 (%ir-block.0):
@ -821,7 +821,7 @@ define amdgpu_ps void @s_buffer_load_v16i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<16 x i16> addrspace(1)* undef`, align 32, addrspace 1)
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<16 x i16> addrspace(1)* undef` + 16, align 32, addrspace 1)
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<16 x i16> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
; GREEDY: S_ENDPGM 0
%val = call <16 x i16> @llvm.amdgcn.s.buffer.load.v16i16(<4 x i32> %rsrc, i32 %soffset, i32 0)
store <16 x i16> %val, <16 x i16> addrspace(1)* undef
@ -851,13 +851,13 @@ define amdgpu_ps void @s_buffer_load_v32i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; CHECK: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 16, align 64, addrspace 1)
; CHECK: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
; CHECK: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
; CHECK: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 32, align 64, addrspace 1)
; CHECK: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
; CHECK: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
; CHECK: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 48, align 64, addrspace 1)
; CHECK: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
; CHECK: S_ENDPGM 0
; GREEDY-LABEL: name: s_buffer_load_v32i16_vgpr_offset
; GREEDY: bb.1 (%ir-block.0):
@ -880,13 +880,13 @@ define amdgpu_ps void @s_buffer_load_v32i16_vgpr_offset(<4 x i32> inreg %rsrc, i
; GREEDY: G_STORE [[UV]](<8 x s16>), [[DEF]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef`, align 64, addrspace 1)
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 16, align 64, addrspace 1)
; GREEDY: G_STORE [[UV1]](<8 x s16>), [[PTR_ADD]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
; GREEDY: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
; GREEDY: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
; GREEDY: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 32, align 64, addrspace 1)
; GREEDY: G_STORE [[UV2]](<8 x s16>), [[PTR_ADD1]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
; GREEDY: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
; GREEDY: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
; GREEDY: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 48, align 64, addrspace 1)
; GREEDY: G_STORE [[UV3]](<8 x s16>), [[PTR_ADD2]](p1) :: (store 16 into `<32 x i16> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
; GREEDY: S_ENDPGM 0
%val = call <32 x i16> @llvm.amdgcn.s.buffer.load.v32i16(<4 x i32> %rsrc, i32 %soffset, i32 0)
store <32 x i16> %val, <32 x i16> addrspace(1)* undef
@ -914,7 +914,7 @@ define amdgpu_ps void @s_buffer_load_v4i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<4 x i64> addrspace(1)* undef`, align 32, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i64> addrspace(1)* undef` + 16, align 32, addrspace 1)
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i64> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
; CHECK: S_ENDPGM 0
; GREEDY-LABEL: name: s_buffer_load_v4i64_vgpr_offset
; GREEDY: bb.1 (%ir-block.0):
@ -935,7 +935,7 @@ define amdgpu_ps void @s_buffer_load_v4i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<4 x i64> addrspace(1)* undef`, align 32, addrspace 1)
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i64> addrspace(1)* undef` + 16, align 32, addrspace 1)
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i64> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
; GREEDY: S_ENDPGM 0
%val = call <4 x i64> @llvm.amdgcn.s.buffer.load.v4i64(<4 x i32> %rsrc, i32 %soffset, i32 0)
store <4 x i64> %val, <4 x i64> addrspace(1)* undef
@ -965,13 +965,13 @@ define amdgpu_ps void @s_buffer_load_v8i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; CHECK: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 16, align 64, addrspace 1)
; CHECK: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
; CHECK: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
; CHECK: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 32, align 64, addrspace 1)
; CHECK: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
; CHECK: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
; CHECK: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 48, align 64, addrspace 1)
; CHECK: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
; CHECK: S_ENDPGM 0
; GREEDY-LABEL: name: s_buffer_load_v8i64_vgpr_offset
; GREEDY: bb.1 (%ir-block.0):
@ -994,13 +994,13 @@ define amdgpu_ps void @s_buffer_load_v8i64_vgpr_offset(<4 x i32> inreg %rsrc, i3
; GREEDY: G_STORE [[UV]](<2 x s64>), [[DEF]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef`, align 64, addrspace 1)
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 16, align 64, addrspace 1)
; GREEDY: G_STORE [[UV1]](<2 x s64>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
; GREEDY: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
; GREEDY: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
; GREEDY: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 32, align 64, addrspace 1)
; GREEDY: G_STORE [[UV2]](<2 x s64>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
; GREEDY: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
; GREEDY: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
; GREEDY: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 48, align 64, addrspace 1)
; GREEDY: G_STORE [[UV3]](<2 x s64>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i64> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
; GREEDY: S_ENDPGM 0
%val = call <8 x i64> @llvm.amdgcn.s.buffer.load.v8i64(<4 x i32> %rsrc, i32 %soffset, i32 0)
store <8 x i64> %val, <8 x i64> addrspace(1)* undef
@ -1028,7 +1028,7 @@ define amdgpu_ps void @s_buffer_load_v4p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store 16 into `<4 x i8 addrspace(1)*> addrspace(1)* undef`, align 32, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, align 32, addrspace 1)
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
; CHECK: S_ENDPGM 0
; GREEDY-LABEL: name: s_buffer_load_v4p1_vgpr_offset
; GREEDY: bb.1 (%ir-block.0):
@ -1049,7 +1049,7 @@ define amdgpu_ps void @s_buffer_load_v4p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store 16 into `<4 x i8 addrspace(1)*> addrspace(1)* undef`, align 32, addrspace 1)
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, align 32, addrspace 1)
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<4 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 32, addrspace 1)
; GREEDY: S_ENDPGM 0
%val = call <4 x i8 addrspace(1)*> @llvm.amdgcn.s.buffer.load.v4p1i8(<4 x i32> %rsrc, i32 %soffset, i32 0)
store <4 x i8 addrspace(1)*> %val, <4 x i8 addrspace(1)*> addrspace(1)* undef
@ -1079,13 +1079,13 @@ define amdgpu_ps void @s_buffer_load_v8p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; CHECK: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, align 64, addrspace 1)
; CHECK: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
; CHECK: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
; CHECK: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 64, addrspace 1)
; CHECK: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
; CHECK: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
; CHECK: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, align 64, addrspace 1)
; CHECK: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
; CHECK: S_ENDPGM 0
; GREEDY-LABEL: name: s_buffer_load_v8p1_vgpr_offset
; GREEDY: bb.1 (%ir-block.0):
@ -1108,13 +1108,13 @@ define amdgpu_ps void @s_buffer_load_v8p1_vgpr_offset(<4 x i32> inreg %rsrc, i32
; GREEDY: G_STORE [[UV]](<2 x p1>), [[DEF]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef`, align 64, addrspace 1)
; GREEDY: [[C2:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 16
; GREEDY: [[PTR_ADD:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C2]](s64)
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, align 64, addrspace 1)
; GREEDY: G_STORE [[UV1]](<2 x p1>), [[PTR_ADD]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 16, basealign 64, addrspace 1)
; GREEDY: [[C3:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 32
; GREEDY: [[PTR_ADD1:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C3]](s64)
; GREEDY: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 64, addrspace 1)
; GREEDY: G_STORE [[UV2]](<2 x p1>), [[PTR_ADD1]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 32, align 32, basealign 64, addrspace 1)
; GREEDY: [[C4:%[0-9]+]]:sgpr(s64) = G_CONSTANT i64 48
; GREEDY: [[PTR_ADD2:%[0-9]+]]:sgpr(p1) = G_PTR_ADD [[DEF]], [[C4]](s64)
; GREEDY: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, align 64, addrspace 1)
; GREEDY: G_STORE [[UV3]](<2 x p1>), [[PTR_ADD2]](p1) :: (store 16 into `<8 x i8 addrspace(1)*> addrspace(1)* undef` + 48, basealign 64, addrspace 1)
; GREEDY: S_ENDPGM 0
%val = call <8 x i8 addrspace(1)*> @llvm.amdgcn.s.buffer.load.v8p1i8(<4 x i32> %rsrc, i32 %soffset, i32 0)
store <8 x i8 addrspace(1)*> %val, <8 x i8 addrspace(1)*> addrspace(1)* undef

View File

@ -119,7 +119,7 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16 from %ir.global.not.uniform.v8i32, align 32, addrspace 1)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 16 from %ir.global.not.uniform.v8i32 + 16, align 32, addrspace 1)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 16 from %ir.global.not.uniform.v8i32 + 16, basealign 32, addrspace 1)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(<8 x s32>) = G_LOAD %0 :: (load 32 from %ir.global.not.uniform.v8i32)
@ -139,7 +139,7 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p1) :: (load 16 from %ir.global.not.uniform.v4i64, align 32, addrspace 1)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load 16 from %ir.global.not.uniform.v4i64 + 16, align 32, addrspace 1)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p1) :: (load 16 from %ir.global.not.uniform.v4i64 + 16, basealign 32, addrspace 1)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(<4 x s64>) = G_LOAD %0 :: (load 32 from %ir.global.not.uniform.v4i64)
@ -158,13 +158,13 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p1) :: (load 16 from %ir.global.not.uniform.v16i32, align 64, addrspace 1)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 16, align 64, addrspace 1)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 16, basealign 64, addrspace 1)
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 32, align 64, addrspace 1)
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 1)
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 48, align 64, addrspace 1)
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p1) :: (load 16 from %ir.global.not.uniform.v16i32 + 48, basealign 64, addrspace 1)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(<16 x s32>) = G_LOAD %0 :: (load 64 from %ir.global.not.uniform.v16i32)
@ -181,13 +181,13 @@ body: |
; CHECK: [[LOAD0:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR]](p1) :: (load 16 from %ir.global.not.uniform.v8i64, align 64, addrspace 1)
; CHECK: [[OFFSET16:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[GEP16:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET16]](s64)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP16]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 16, align 64, addrspace 1)
; CHECK: [[LOAD16:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP16]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 16, basealign 64, addrspace 1)
; CHECK: [[OFFSET32:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[GEP32:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET32]](s64)
; CHECK: [[LOAD32:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP32]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 32, align 64, addrspace 1)
; CHECK: [[LOAD32:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP32]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 1)
; CHECK: [[OFFSET48:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[GEP48:%[0-9]+]]:vgpr(p1) = G_PTR_ADD [[PTR]], [[OFFSET48]](s64)
; CHECK: [[LOAD48:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP48]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 48, align 64, addrspace 1)
; CHECK: [[LOAD48:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[GEP48]](p1) :: (load 16 from %ir.global.not.uniform.v8i64 + 48, basealign 64, addrspace 1)
; CHECK: %1:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD0]](<2 x s64>), [[LOAD16]](<2 x s64>), [[LOAD32]](<2 x s64>), [[LOAD48]](<2 x s64>)
%0:_(p1) = COPY $sgpr0_sgpr1
%1:_(<8 x s64>) = G_LOAD %0 :: (load 64 from %ir.global.not.uniform.v8i64)
@ -262,7 +262,7 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16 from %ir.constant.not.uniform.v8i32, align 32, addrspace 4)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v8i32 + 16, align 32, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v8i32 + 16, basealign 32, addrspace 4)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(<8 x s32>) = G_LOAD %0 :: (load 32 from %ir.constant.not.uniform.v8i32)
@ -281,7 +281,7 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(s128) = G_LOAD [[COPY]](p4) :: (load 16 from %ir.constant.not.uniform, align 32, addrspace 4)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(s128) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform + 16, align 32, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(s128) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform + 16, basealign 32, addrspace 4)
; CHECK: [[MV:%[0-9]+]]:vgpr(s256) = G_MERGE_VALUES [[LOAD]](s128), [[LOAD1]](s128)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(s256) = G_LOAD %0 :: (load 32 from %ir.constant.not.uniform)
@ -301,7 +301,7 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[COPY]](p4) :: (load 16 from %ir.constant.not.uniform, align 32, addrspace 4)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform + 16, align 32, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<8 x s16>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform + 16, basealign 32, addrspace 4)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s16>) = G_CONCAT_VECTORS [[LOAD]](<8 x s16>), [[LOAD1]](<8 x s16>)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(<16 x s16>) = G_LOAD %0 :: (load 32 from %ir.constant.not.uniform)
@ -320,7 +320,7 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load 16 from %ir.constant.not.uniform.v4i64, align 32, addrspace 4)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v4i64 + 16, align 32, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v4i64 + 16, basealign 32, addrspace 4)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<4 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(<4 x s64>) = G_LOAD %0 :: (load 32 from %ir.constant.not.uniform.v4i64)
@ -339,13 +339,13 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[COPY]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32, align 64, addrspace 4)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 16, align 64, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 16, basealign 64, addrspace 4)
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 32, align 64, addrspace 4)
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD1]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 32, align 32, basealign 64, addrspace 4)
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 48, align 64, addrspace 4)
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<4 x s32>) = G_LOAD [[PTR_ADD2]](p4) :: (load 16 from %ir.constant.not.uniform.v16i32 + 48, basealign 64, addrspace 4)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<16 x s32>) = G_CONCAT_VECTORS [[LOAD]](<4 x s32>), [[LOAD1]](<4 x s32>), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(<16 x s32>) = G_LOAD %0 :: (load 64 from %ir.constant.not.uniform.v16i32)
@ -364,13 +364,13 @@ body: |
; CHECK: [[LOAD:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[COPY]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64, align 64, addrspace 4)
; CHECK: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 16, align 64, addrspace 4)
; CHECK: [[LOAD1:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 16, basealign 64, addrspace 4)
; CHECK: [[C1:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 32, align 64, addrspace 4)
; CHECK: [[LOAD2:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD1]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 32, align 32, basealign 64, addrspace 4)
; CHECK: [[C2:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:vgpr(p4) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 48, align 64, addrspace 4)
; CHECK: [[LOAD3:%[0-9]+]]:vgpr(<2 x s64>) = G_LOAD [[PTR_ADD2]](p4) :: (load 16 from %ir.constant.not.uniform.v8i64 + 48, basealign 64, addrspace 4)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:vgpr(<8 x s64>) = G_CONCAT_VECTORS [[LOAD]](<2 x s64>), [[LOAD1]](<2 x s64>), [[LOAD2]](<2 x s64>), [[LOAD3]](<2 x s64>)
%0:_(p4) = COPY $sgpr0_sgpr1
%1:_(<8 x s64>) = G_LOAD %0 :: (load 64 from %ir.constant.not.uniform.v8i64)

View File

@ -21,10 +21,10 @@ body: |
liveins: $rdi
; CHECK: [[@LINE+1]]:65: expected 'align'
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, 32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, basealign 32)
$xmm2 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm2
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, basealign 32)
RETQ
...

View File

@ -21,10 +21,10 @@ body: |
liveins: $rdi
; CHECK: [[@LINE+1]]:70: expected an integer literal after 'align'
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, basealign 32)
$xmm2 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm2
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, basealign 32)
RETQ
...

View File

@ -21,10 +21,10 @@ body: |
liveins: $rdi
; CHECK: [[@LINE+1]]:71: expected an integer literal after 'align'
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align -32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, basealign 32)
$xmm2 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm2
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, basealign 32)
RETQ
...

View File

@ -60,11 +60,11 @@
ret void
}
define void @memory_alignment(<8 x float>* %vec) {
define void @memory_alignment(<16 x float>* %vec) {
entry:
%v = load <8 x float>, <8 x float>* %vec
%v2 = insertelement <8 x float> %v, float 0.0, i32 4
store <8 x float> %v2, <8 x float>* %vec
%v = load <16 x float>, <16 x float>* %vec
%v2 = insertelement <16 x float> %v, float 0.0, i32 4
store <16 x float> %v2, <16 x float>* %vec
ret void
}
@ -312,16 +312,24 @@ body: |
bb.0.entry:
liveins: $rdi
; CHECK: name: memory_alignment
; CHECK: $xmm0 = MOVAPSrm $rdi, 1, $noreg, 0, $noreg :: (load 16 from %ir.vec, align 32)
; CHECK-NEXT: $xmm1 = MOVAPSrm $rdi, 1, $noreg, 16, $noreg :: (load 16 from %ir.vec + 16, align 32)
; CHECK: MOVAPSmr $rdi, 1, $noreg, 0, $noreg, killed $xmm0 :: (store 16 into %ir.vec, align 32)
; CHECK-NEXT: MOVAPSmr killed $rdi, 1, $noreg, 16, $noreg, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 32)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, align 32)
$xmm2 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm2
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 32)
MOVAPSmr killed $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, align 32)
; CHECK: $xmm0 = MOVAPSrm $rdi, 1, $noreg, 0, $noreg :: (load 16 from %ir.vec, align 64)
; CHECK-NEXT: $xmm1 = MOVAPSrm $rdi, 1, $noreg, 16, $noreg :: (load 16 from %ir.vec + 16, basealign 64)
; CHECK-NEXT: $xmm2 = MOVAPSrm $rdi, 1, $noreg, 32, $noreg :: (load 16 from %ir.vec + 32, align 32, basealign 64)
; CHECK-NEXT: $xmm3 = MOVAPSrm $rdi, 1, $noreg, 48, $noreg :: (load 16 from %ir.vec + 48, basealign 64)
; CHECK: MOVAPSmr $rdi, 1, $noreg, 0, $noreg, killed $xmm0 :: (store 16 into %ir.vec, align 64)
; CHECK-NEXT: MOVAPSmr $rdi, 1, $noreg, 16, $noreg, killed $xmm1 :: (store 16 into %ir.vec + 16, basealign 64)
; CHECK-NEXT: MOVAPSmr $rdi, 1, $noreg, 32, $noreg, killed $xmm2 :: (store 16 into %ir.vec + 32, align 32, basealign 64)
; CHECK-NEXT: MOVAPSmr killed $rdi, 1, $noreg, 48, $noreg, killed $xmm3 :: (store 16 into %ir.vec + 48, basealign 64)
$xmm0 = MOVAPSrm $rdi, 1, _, 0, _ :: (load 16 from %ir.vec, align 64)
$xmm1 = MOVAPSrm $rdi, 1, _, 16, _ :: (load 16 from %ir.vec + 16, basealign 64)
$xmm2 = MOVAPSrm $rdi, 1, _, 32, _ :: (load 16 from %ir.vec + 32, align 32, basealign 64)
$xmm3 = MOVAPSrm $rdi, 1, _, 48, _ :: (load 16 from %ir.vec + 48, basealign 64)
$xmm4 = FsFLD0SS
$xmm1 = MOVSSrr killed $xmm1, killed $xmm4
MOVAPSmr $rdi, 1, _, 0, _, killed $xmm0 :: (store 16 into %ir.vec, align 64)
MOVAPSmr $rdi, 1, _, 16, _, killed $xmm1 :: (store 16 into %ir.vec + 16, basealign 64)
MOVAPSmr $rdi, 1, _, 32, _, killed $xmm2 :: (store 16 into %ir.vec + 32, align 32, basealign 64)
MOVAPSmr killed $rdi, 1, _, 48, _, killed $xmm3 :: (store 16 into %ir.vec + 48, basealign 64)
RETQ
...
---

View File

@ -307,7 +307,7 @@ body: |
; MIPS32: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 4)
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 4)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 2, basealign 4)
; MIPS32: RetRA
; MIPS32R6-LABEL: name: store3align4
; MIPS32R6: liveins: $a0, $a1
@ -319,7 +319,7 @@ body: |
; MIPS32R6: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 4)
; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 4)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 2, basealign 4)
; MIPS32R6: RetRA
%0:_(p0) = COPY $a0
%1:_(s32) = COPY $a1
@ -346,7 +346,7 @@ body: |
; MIPS32: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 8)
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 8)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 2, basealign 8)
; MIPS32: RetRA
; MIPS32R6-LABEL: name: store3align8
; MIPS32R6: liveins: $a0, $a1
@ -358,7 +358,7 @@ body: |
; MIPS32R6: G_STORE [[COPY2]](s32), [[COPY]](p0) :: (store 2 into %ir.0, align 8)
; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C1]](s32)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 8)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 2, align 2, basealign 8)
; MIPS32R6: RetRA
%0:_(p0) = COPY $a0
%1:_(s32) = COPY $a1
@ -494,7 +494,7 @@ body: |
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 8)
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 4, basealign 8)
; MIPS32: RetRA
; MIPS32R6-LABEL: name: store5align8
; MIPS32R6: liveins: $a0, $a2, $a3
@ -504,7 +504,7 @@ body: |
; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 8)
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 1 into %ir.0 + 4, align 4, basealign 8)
; MIPS32R6: RetRA
%0:_(p0) = COPY $a0
%2:_(s32) = COPY $a2
@ -647,7 +647,7 @@ body: |
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; MIPS32: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4, basealign 8)
; MIPS32: RetRA
; MIPS32R6-LABEL: name: store6align8
; MIPS32R6: liveins: $a0, $a2, $a3
@ -657,7 +657,7 @@ body: |
; MIPS32R6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; MIPS32R6: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4, basealign 8)
; MIPS32R6: RetRA
%0:_(p0) = COPY $a0
%2:_(s32) = COPY $a2
@ -788,7 +788,7 @@ body: |
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4)
; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 4)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 2, basealign 4)
; MIPS32: RetRA
; MIPS32R6-LABEL: name: store7align4
; MIPS32R6: liveins: $a0, $a2, $a3
@ -803,7 +803,7 @@ body: |
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4)
; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 4)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 2, basealign 4)
; MIPS32R6: RetRA
%0:_(p0) = COPY $a0
%2:_(s32) = COPY $a2
@ -832,10 +832,10 @@ body: |
; MIPS32: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; MIPS32: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
; MIPS32: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4, basealign 8)
; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 8)
; MIPS32: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 2, basealign 8)
; MIPS32: RetRA
; MIPS32R6-LABEL: name: store7align8
; MIPS32R6: liveins: $a0, $a2, $a3
@ -847,10 +847,10 @@ body: |
; MIPS32R6: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4 into %ir.0, align 8)
; MIPS32R6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; MIPS32R6: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s32)
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 8)
; MIPS32R6: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store 2 into %ir.0 + 4, align 4, basealign 8)
; MIPS32R6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32R6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s32)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 8)
; MIPS32R6: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.0 + 6, align 2, basealign 8)
; MIPS32R6: RetRA
%0:_(p0) = COPY $a0
%2:_(s32) = COPY $a2

View File

@ -46,7 +46,7 @@ body: |
; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.ptr, align 8)
; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.ptr + 4, align 8)
; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.ptr + 4, basealign 8)
; MIPS32: $v0 = COPY [[LOAD]](s32)
; MIPS32: $v1 = COPY [[LOAD1]](s32)
; MIPS32: RetRA implicit $v0, implicit $v1

View File

@ -477,21 +477,21 @@ body: |
; MIPS32: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.a + 4, align 8)
; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.a + 4, basealign 8)
; MIPS32: G_BR %bb.6
; MIPS32: bb.4.b.PHI.1.1:
; MIPS32: successors: %bb.6(0x80000000)
; MIPS32: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP1:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.b + 4, align 8)
; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.b + 4, basealign 8)
; MIPS32: G_BR %bb.6
; MIPS32: bb.5.b.PHI.1.2:
; MIPS32: successors: %bb.6(0x80000000)
; MIPS32: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.c, align 8)
; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP2:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD1]], [[C6]](s32)
; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.c + 4, align 8)
; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.c + 4, basealign 8)
; MIPS32: bb.6.b.PHI.1:
; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
; MIPS32: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
@ -505,7 +505,7 @@ body: |
; MIPS32: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP3:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C8]](s32)
; MIPS32: G_STORE [[PHI1]](s32), [[GEP3]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[PHI1]](s32), [[GEP3]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: RetRA
; MIPS32: bb.8.pre.PHI.2:
; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
@ -519,14 +519,14 @@ body: |
; MIPS32: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
; MIPS32: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP4:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C10]](s32)
; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.a + 4, align 8)
; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.a + 4, basealign 8)
; MIPS32: G_BR %bb.11
; MIPS32: bb.10.b.PHI.2.1:
; MIPS32: successors: %bb.11(0x80000000)
; MIPS32: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
; MIPS32: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP5:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C11]](s32)
; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP5]](p0) :: (load 4 from %ir.b + 4, align 8)
; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP5]](p0) :: (load 4 from %ir.b + 4, basealign 8)
; MIPS32: bb.11.b.PHI.2:
; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
; MIPS32: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
@ -539,7 +539,7 @@ body: |
; MIPS32: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP6:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C13]](s32)
; MIPS32: G_STORE [[PHI3]](s32), [[GEP6]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[PHI3]](s32), [[GEP6]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: RetRA
; MIPS32: bb.13.b.PHI.3:
; MIPS32: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
@ -558,11 +558,11 @@ body: |
; MIPS32: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP7:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C15]](s32)
; MIPS32: G_STORE [[SELECT3]](s32), [[GEP7]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[SELECT3]](s32), [[GEP7]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP8:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C16]](s32)
; MIPS32: G_STORE [[PHI5]](s32), [[GEP8]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[PHI5]](s32), [[GEP8]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3

View File

@ -477,21 +477,21 @@ body: |
; MIPS32: [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
; MIPS32: [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C4]](s32)
; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.a + 4, align 8)
; MIPS32: [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.a + 4, basealign 8)
; MIPS32: G_BR %bb.6
; MIPS32: bb.4.b.PHI.1.1:
; MIPS32: successors: %bb.6(0x80000000)
; MIPS32: [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
; MIPS32: [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP1:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C5]](s32)
; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.b + 4, align 8)
; MIPS32: [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.b + 4, basealign 8)
; MIPS32: G_BR %bb.6
; MIPS32: bb.5.b.PHI.1.2:
; MIPS32: successors: %bb.6(0x80000000)
; MIPS32: [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.c, align 8)
; MIPS32: [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP2:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD1]], [[C6]](s32)
; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.c + 4, align 8)
; MIPS32: [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.c + 4, basealign 8)
; MIPS32: bb.6.b.PHI.1:
; MIPS32: successors: %bb.7(0x40000000), %bb.13(0x40000000)
; MIPS32: [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
@ -505,7 +505,7 @@ body: |
; MIPS32: G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP3:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C8]](s32)
; MIPS32: G_STORE [[PHI1]](s32), [[GEP3]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[PHI1]](s32), [[GEP3]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: RetRA
; MIPS32: bb.8.pre.PHI.2:
; MIPS32: successors: %bb.9(0x40000000), %bb.10(0x40000000)
@ -519,14 +519,14 @@ body: |
; MIPS32: [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
; MIPS32: [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP4:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY3]], [[C10]](s32)
; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.a + 4, align 8)
; MIPS32: [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.a + 4, basealign 8)
; MIPS32: G_BR %bb.11
; MIPS32: bb.10.b.PHI.2.1:
; MIPS32: successors: %bb.11(0x80000000)
; MIPS32: [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
; MIPS32: [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP5:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD]], [[C11]](s32)
; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP5]](p0) :: (load 4 from %ir.b + 4, align 8)
; MIPS32: [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP5]](p0) :: (load 4 from %ir.b + 4, basealign 8)
; MIPS32: bb.11.b.PHI.2:
; MIPS32: successors: %bb.13(0x40000000), %bb.12(0x40000000)
; MIPS32: [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
@ -539,7 +539,7 @@ body: |
; MIPS32: G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP6:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C13]](s32)
; MIPS32: G_STORE [[PHI3]](s32), [[GEP6]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[PHI3]](s32), [[GEP6]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: RetRA
; MIPS32: bb.13.b.PHI.3:
; MIPS32: [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
@ -558,11 +558,11 @@ body: |
; MIPS32: G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP7:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C15]](s32)
; MIPS32: G_STORE [[SELECT3]](s32), [[GEP7]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[SELECT3]](s32), [[GEP7]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
; MIPS32: [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP8:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[LOAD2]], [[C16]](s32)
; MIPS32: G_STORE [[PHI5]](s32), [[GEP8]](p0) :: (store 4 into %ir.result + 4, align 8)
; MIPS32: G_STORE [[PHI5]](s32), [[GEP8]](p0) :: (store 4 into %ir.result + 4, basealign 8)
; MIPS32: RetRA
bb.1.entry:
liveins: $a0, $a1, $a2, $a3

View File

@ -46,7 +46,7 @@ body: |
; MIPS32: G_STORE [[COPY]](s32), [[COPY2]](p0) :: (store 4 into %ir.ptr, align 8)
; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
; MIPS32: [[GEP:%[0-9]+]]:gprb(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
; MIPS32: G_STORE [[COPY1]](s32), [[GEP]](p0) :: (store 4 into %ir.ptr + 4, align 8)
; MIPS32: G_STORE [[COPY1]](s32), [[GEP]](p0) :: (store 4 into %ir.ptr + 4, basealign 8)
; MIPS32: RetRA
%2:_(s32) = COPY $a0
%3:_(s32) = COPY $a1

View File

@ -992,7 +992,7 @@ declare void @test_stackarg_int(i32, i32, i32, i32, i32, i32, i32, i32, i8 zeroe
; 32BIT-DAG: renamable $r[[REGLLIADDR:[0-9]+]] = LWZtoc @lli, $r2 :: (load 4 from got)
; 32BIT-DAG: renamable $r[[REGLLI1:[0-9]+]] = LWZ 0, renamable $r[[REGLLIADDR]] :: (dereferenceable load 4 from @lli, align 8)
; 32BIT-DAG: STW killed renamable $r[[REGLLI1]], 68, $r1 :: (store 4)
; 32BIT-DAG: renamable $r[[REGLLI2:[0-9]+]] = LWZ 4, killed renamable $r[[REGLLIADDR]] :: (dereferenceable load 4 from @lli + 4, align 8)
; 32BIT-DAG: renamable $r[[REGLLI2:[0-9]+]] = LWZ 4, killed renamable $r[[REGLLIADDR]] :: (dereferenceable load 4 from @lli + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REGLLI2]], 72, $r1 :: (store 4)
; 32BIT-DAG: STW renamable $r[[REGI]], 76, $r1 :: (store 4)
; 32BIT-NEXT: BL_NOP <mcsymbol .test_stackarg_int[PR]>, csr_aix32, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $r5, implicit $r6, implicit $r7, implicit $r8, implicit $r9, implicit $r10, implicit $r2, implicit-def $r1
@ -1484,7 +1484,7 @@ entry:
; 32BIT-DAG: $r10 = LI 8
; 32BIT-DAG: renamable $r[[REGLL1ADDR:[0-9]+]] = LWZtoc @ll1, $r2 :: (load 4 from got)
; 32BIT-DAG: renamable $r[[REGLL1A:[0-9]+]] = LWZ 0, renamable $r[[REGLL1ADDR]] :: (dereferenceable load 4 from @ll1, align 8)
; 32BIT-DAG: renamable $r[[REGLL1B:[0-9]+]] = LWZ 4, killed renamable $r[[REGLL1ADDR]] :: (dereferenceable load 4 from @ll1 + 4, align 8)
; 32BIT-DAG: renamable $r[[REGLL1B:[0-9]+]] = LWZ 4, killed renamable $r[[REGLL1ADDR]] :: (dereferenceable load 4 from @ll1 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REGLL1A]], 56, $r1 :: (store 4)
; 32BIT-DAG: STW killed renamable $r[[REGLL1B]], 60, $r1 :: (store 4)
; 32BIT-DAG: renamable $r[[REGSIADDR:[0-9]+]] = LWZtoc @si1, $r2 :: (load 4 from got)
@ -1501,7 +1501,7 @@ entry:
; 32BIT-DAG: STW killed renamable $r[[REGSI]], 76, $r1 :: (store 4)
; 32BIT-DAG: renamable $r[[REGLL2ADDR:[0-9]+]] = LWZtoc @ll2, $r2 :: (load 4 from got)
; 32BIT-DAG: renamable $r[[REGLL2A:[0-9]+]] = LWZ 0, renamable $r[[REGLL2ADDR]] :: (dereferenceable load 4 from @ll2, align 8)
; 32BIT-DAG: renamable $r[[REGLL2B:[0-9]+]] = LWZ 4, killed renamable $r[[REGLL2ADDR]] :: (dereferenceable load 4 from @ll2 + 4, align 8)
; 32BIT-DAG: renamable $r[[REGLL2B:[0-9]+]] = LWZ 4, killed renamable $r[[REGLL2ADDR]] :: (dereferenceable load 4 from @ll2 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REGLL2A]], 80, $r1 :: (store 4)
; 32BIT-DAG: STW killed renamable $r[[REGLL2B]], 84, $r1 :: (store 4)
; 32BIT-DAG: renamable $r[[REGUCADDR:[0-9]+]] = LWZtoc @uc1, $r2 :: (load 4 from got)
@ -1842,23 +1842,23 @@ entry:
; 32BIT-DAG: renamable $r[[SCRATCHREG:[0-9]+]] = LWZtoc %const.10, $r2 :: (load 4 from got)
; 32BIT-DAG: renamable $r[[SCRATCHREG:[0-9]+]] = LWZtoc %const.11, $r2 :: (load 4 from got)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 56, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 60, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 60, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 64, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 68, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 68, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 72, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 76, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 76, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 80, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[SCRATCHREG:[0-9]+]], 84, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[SCRATCHREG:[0-9]+]], 84, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 88, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 92, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 92, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 96, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 100, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 100, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 104, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 108, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 108, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 112, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 116, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 116, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 120, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 124, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 124, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[SCRATCHREG:[0-9]+]], 128, $r1 :: (store 4)
; 32BIT-DAG: renamable $r[[REGF1:[0-9]+]] = LWZtoc @f14, $r2 :: (load 4 from got)
; 32BIT-DAG: renamable $r3 = LWZ 0, killed renamable $r[[REGF1]] :: (load 4 from @f14)
@ -2243,33 +2243,33 @@ define void @caller_mix() {
; 32BIT-DAG: $r9 = LI 7
; 32BIT-DAG: $r10 = LI 8
; 32BIT-DAG: STW killed renamable $r[[REG1:[0-9]+]], 56, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG2:[0-9]+]], 60, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG2:[0-9]+]], 60, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG3:[0-9]+]], 64, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG4:[0-9]+]], 68, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG4:[0-9]+]], 68, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG5:[0-9]+]], 72, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG6:[0-9]+]], 76, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG6:[0-9]+]], 76, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG7:[0-9]+]], 80, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG8:[0-9]+]], 84, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG8:[0-9]+]], 84, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG9:[0-9]+]], 88, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG10:[0-9]+]], 92, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG10:[0-9]+]], 92, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG11:[0-9]+]], 96, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG12:[0-9]+]], 100, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG12:[0-9]+]], 100, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG13:[0-9]+]], 104, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG14:[0-9]+]], 108, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG14:[0-9]+]], 108, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG15:[0-9]+]], 112, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG16:[0-9]+]], 116, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG16:[0-9]+]], 116, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG17:[0-9]+]], 120, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG18:[0-9]+]], 128, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG19:[0-9]+]], 124, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG20:[0-9]+]], 132, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW renamable $r[[REG19:[0-9]+]], 124, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG20:[0-9]+]], 132, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG21:[0-9]+]], 136, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG22:[0-9]+]], 140, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG22:[0-9]+]], 140, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG23:[0-9]+]], 144, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG24:[0-9]+]], 148, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG24:[0-9]+]], 148, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG25:[0-9]+]], 152, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG26:[0-9]+]], 156, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG26:[0-9]+]], 156, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-DAG: STW killed renamable $r[[REG27:[0-9]+]], 160, $r1 :: (store 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG28:[0-9]+]], 164, $r1 :: (store 4 + 4, align 8)
; 32BIT-DAG: STW killed renamable $r[[REG28:[0-9]+]], 164, $r1 :: (store 4 + 4, basealign 8)
; 32BIT-NEXT: BL_NOP <mcsymbol .mix_floats>, csr_aix32, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $r5, implicit $r6, implicit $r7, implicit $r8, implicit $r9, implicit $r10, implicit $f1, implicit $f2, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def dead $r3
; 32BIT-NEXT: ADJCALLSTACKUP 168, 0, implicit-def dead $r1, implicit $r1

View File

@ -94,11 +94,11 @@ body: |
; CHECK: gr32 = MOV32rm %stack.0.a, 1, $noreg, 36, $noreg :: (dereferenceable load 4 from %ir.scevgep40 + 32)
; CHECK-NEXT: MOV32mr %stack.1.z, 1, $noreg, 32, $noreg, killed %7 :: (store 4 into %ir.0 + 32, align 16)
; CHECK-NEXT: %8:vr128 = VMOVUPSrm %stack.0.a, 1, $noreg, 40, $noreg :: (dereferenceable load 16 from %ir.scevgep40 + 36, align 4)
; CHECK-NEXT: VMOVUPSmr %stack.1.z, 1, $noreg, 36, $noreg, killed %8 :: (store 16 into %ir.0 + 36)
; CHECK-NEXT: VMOVUPSmr %stack.1.z, 1, $noreg, 36, $noreg, killed %8 :: (store 16 into %ir.0 + 36, align 4, basealign 16)
; CHECK-NEXT: %9:gr64 = MOV64rm %stack.0.a, 1, $noreg, 56, $noreg :: (dereferenceable load 8 from %ir.scevgep40 + 52, align 4)
; CHECK-NEXT: MOV64mr %stack.1.z, 1, $noreg, 52, $noreg, killed %9 :: (store 8 into %ir.0 + 52, align 16)
; CHECK-NEXT: MOV64mr %stack.1.z, 1, $noreg, 52, $noreg, killed %9 :: (store 8 into %ir.0 + 52, align 4, basealign 16)
; CHECK-NEXT: %10:gr32 = MOV32rm %stack.0.a, 1, $noreg, 64, $noreg :: (dereferenceable load 4 from %ir.scevgep40 + 60)
; CHECK-NEXT: MOV32mr %stack.1.z, 1, $noreg, 60, $noreg, killed %10 :: (store 4 into %ir.0 + 60, align 16)
; CHECK-NEXT: MOV32mr %stack.1.z, 1, $noreg, 60, $noreg, killed %10 :: (store 4 into %ir.0 + 60, basealign 16)
%6:vr256 = VMOVUPSYrm %stack.0.a, 1, $noreg, 36, $noreg :: (dereferenceable load 32 from %ir.scevgep40 + 32, align 4)
VMOVUPSYmr %stack.1.z, 1, $noreg, 32, $noreg, killed %6 :: (store 32 into %ir.0 + 32, align 16)
$eax = COPY %0

View File

@ -11,8 +11,8 @@ body: |
bb.0:
; CHECK-LABEL: name: foo
; CHECK: renamable $eax = IMPLICIT_DEF
; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load 1 from `i168* undef` + 20, align 16)
; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load 4 from `i168* undef` + 12, align 16)
; CHECK: renamable $edx = MOVZX32rm8 renamable $eax, 1, $noreg, 0, $noreg :: (load 1 from `i168* undef` + 20, align 4, basealign 16)
; CHECK: dead renamable $ecx = MOV32rm renamable $eax, 1, $noreg, 0, $noreg :: (load 4 from `i168* undef` + 12, basealign 16)
; CHECK: renamable $al = MOV8rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load 1 from `i32* undef`, align 4)
; CHECK: dead renamable $ecx = COPY renamable $edx
; CHECK: dead renamable $ecx = COPY renamable $edx
@ -26,8 +26,8 @@ body: |
; CHECK: dead renamable $eax = SHRD32rrCL renamable $eax, killed renamable $edx, implicit-def dead $eflags, implicit killed $cl
; CHECK: RETL
%0:gr32 = IMPLICIT_DEF
%1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load 1 from `i168* undef` + 20, align 16)
%2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load 4 from `i168* undef` + 12, align 16)
%1:gr32 = MOVZX32rm8 %0, 1, $noreg, 0, $noreg :: (load 1 from `i168* undef` + 20, align 4, basealign 16)
%2:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load 4 from `i168* undef` + 12, basealign 16)
%3:gr8 = MOV8rm %0, 1, $noreg, 0, $noreg :: (load 1 from `i32* undef`, align 4)
%4:gr32 = COPY %1
%5:gr32 = COPY %1