1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 02:33:06 +01:00

CodeGen: Print/parse LLTs in MachineMemOperands

This will currently accept the old number of bytes syntax, and convert
it to a scalar. This should be removed in the near future (I think I
converted all of the tests already, but likely missed a few).

Not sure what the exact syntax and policy should be. We can continue
printing the number of bytes for non-generic instructions to avoid
test churn and only allow non-scalar types for generic instructions.

This will currently print the LLT in parentheses, but accept parsing
the existing integers and implicitly converting to scalar. The
parentheses are a bit ugly, but the parser logic seems unable to deal
without either parentheses or some keyword to indicate the start of a
type.
This commit is contained in:
Matt Arsenault 2021-05-19 22:25:51 -04:00
parent fc25f6691b
commit cc12b285b6
1162 changed files with 33485 additions and 32782 deletions

View File

@ -3220,18 +3220,34 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (parseOptionalAtomicOrdering(FailureOrder))
return true;
LLT MemoryType;
if (Token.isNot(MIToken::IntegerLiteral) &&
Token.isNot(MIToken::kw_unknown_size))
return error("expected the size integer literal or 'unknown-size' after "
Token.isNot(MIToken::kw_unknown_size) &&
Token.isNot(MIToken::lparen))
return error("expected memory LLT, the size integer literal or 'unknown-size' after "
"memory operation");
uint64_t Size;
uint64_t Size = MemoryLocation::UnknownSize;
if (Token.is(MIToken::IntegerLiteral)) {
if (getUint64(Size))
return true;
// Convert from bytes to bits for storage.
MemoryType = LLT::scalar(8 * Size);
lex();
} else if (Token.is(MIToken::kw_unknown_size)) {
Size = MemoryLocation::UnknownSize;
}
lex();
} else {
if (expectAndConsume(MIToken::lparen))
return true;
if (parseLowLevelType(Token.location(), MemoryType))
return true;
if (expectAndConsume(MIToken::rparen))
return true;
Size = MemoryType.getSizeInBytes();
}
MachinePointerInfo Ptr = MachinePointerInfo();
if (Token.is(MIToken::Identifier)) {
@ -3247,7 +3263,8 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (parseMachinePointerInfo(Ptr))
return true;
}
unsigned BaseAlignment = (Size != MemoryLocation::UnknownSize ? Size : 1);
unsigned BaseAlignment =
(Size != MemoryLocation::UnknownSize ? PowerOf2Ceil(Size) : 1);
AAMDNodes AAInfo;
MDNode *Range = nullptr;
while (consumeIfPresent(MIToken::comma)) {
@ -3294,8 +3311,8 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
}
if (expectAndConsume(MIToken::rparen))
return true;
Dest = MF.getMachineMemOperand(Ptr, Flags, Size, Align(BaseAlignment), AAInfo,
Range, SSID, Order, FailureOrder);
Dest = MF.getMachineMemOperand(Ptr, Flags, MemoryType, Align(BaseAlignment),
AAInfo, Range, SSID, Order, FailureOrder);
return false;
}

View File

@ -1121,10 +1121,10 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (getFailureOrdering() != AtomicOrdering::NotAtomic)
OS << toIRString(getFailureOrdering()) << ' ';
if (getSize() == MemoryLocation::UnknownSize)
OS << "unknown-size";
if (getMemoryType().isValid())
OS << '(' << getMemoryType() << ')';
else
OS << getSize();
OS << "unknown-size";
if (const Value *Val = getValue()) {
OS << ((isLoad() && isStore()) ? " on " : isLoad() ? " from " : " into ");

View File

@ -25,22 +25,22 @@ define void @test_varargs() {
; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C8]](s64)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[ANYEXT]](s32)
; CHECK: G_STORE [[ANYEXT1]](s64), [[PTR_ADD]](p0) :: (store 8 into stack, align 1)
; CHECK: G_STORE [[ANYEXT1]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 1)
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[C4]](s16)
; CHECK: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C9]](s64)
; CHECK: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[ANYEXT2]](s32)
; CHECK: G_STORE [[ANYEXT3]](s64), [[PTR_ADD1]](p0) :: (store 8 into stack + 8, align 1)
; CHECK: G_STORE [[ANYEXT3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into stack + 8, align 1)
; CHECK: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C10]](s64)
; CHECK: [[ANYEXT4:%[0-9]+]]:_(s64) = G_ANYEXT [[C5]](s32)
; CHECK: G_STORE [[ANYEXT4]](s64), [[PTR_ADD2]](p0) :: (store 8 into stack + 16, align 1)
; CHECK: G_STORE [[ANYEXT4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into stack + 16, align 1)
; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C11]](s64)
; CHECK: G_STORE [[C6]](s32), [[PTR_ADD3]](p0) :: (store 4 into stack + 24, align 1)
; CHECK: G_STORE [[C6]](s32), [[PTR_ADD3]](p0) :: (store (s32) into stack + 24, align 1)
; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
; CHECK: G_STORE [[C7]](s64), [[PTR_ADD4]](p0) :: (store 8 into stack + 32, align 1)
; CHECK: G_STORE [[C7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into stack + 32, align 1)
; CHECK: BL @varargs, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $d0, implicit $x1
; CHECK: ADJCALLSTACKUP 40, 0, implicit-def $sp, implicit $sp
; CHECK: RET_ReallyLR
@ -79,16 +79,16 @@ define i32 @i8i16caller() nounwind readnone {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
; CHECK: G_STORE [[C8]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C8]](s8), [[PTR_ADD]](p0) :: (store (s8) into stack)
; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
; CHECK: G_STORE [[C9]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 2, align 1)
; CHECK: G_STORE [[C9]](s16), [[PTR_ADD1]](p0) :: (store (s16) into stack + 2, align 1)
; CHECK: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C14]](s64)
; CHECK: G_STORE [[C10]](s8), [[PTR_ADD2]](p0) :: (store 1 into stack + 4)
; CHECK: G_STORE [[C10]](s8), [[PTR_ADD2]](p0) :: (store (s8) into stack + 4)
; CHECK: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C15]](s64)
; CHECK: G_STORE [[C11]](s8), [[PTR_ADD3]](p0) :: (store 1 into stack + 5)
; CHECK: G_STORE [[C11]](s8), [[PTR_ADD3]](p0) :: (store (s8) into stack + 5)
; CHECK: BL @i8i16callee, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3, implicit $w4, implicit $x5, implicit $x6, implicit $x7, implicit-def $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: ADJCALLSTACKUP 6, 0, implicit-def $sp, implicit $sp

View File

@ -121,7 +121,7 @@ define void @test_stack_ext_needed() {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store (s8) into stack)
; CHECK: BL @stack_ext_needed, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7
; CHECK: ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp
; CHECK: RET_ReallyLR
@ -141,7 +141,7 @@ define void @callee_s128(i128 %a, i128 %b, i128 *%ptr) {
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
; CHECK: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $x4
; CHECK: G_STORE [[MV1]](s128), [[COPY4]](p0) :: (store 16 into %ir.ptr)
; CHECK: G_STORE [[MV1]](s128), [[COPY4]](p0) :: (store (s128) into %ir.ptr)
; CHECK: RET_ReallyLR
store i128 %b, i128 *%ptr
ret void
@ -153,7 +153,7 @@ define void @caller_s128(i128 *%ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128) from %ir.ptr)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128)
; CHECK: $x0 = COPY [[UV]](s64)
@ -202,16 +202,16 @@ define i32 @i8i16caller() nounwind readnone {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C12]](s64)
; CHECK: G_STORE [[C8]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C8]](s8), [[PTR_ADD]](p0) :: (store (s8) into stack)
; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C13]](s64)
; CHECK: G_STORE [[C9]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 8, align 1)
; CHECK: G_STORE [[C9]](s16), [[PTR_ADD1]](p0) :: (store (s16) into stack + 8, align 1)
; CHECK: [[C14:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C14]](s64)
; CHECK: G_STORE [[C10]](s8), [[PTR_ADD2]](p0) :: (store 1 into stack + 16)
; CHECK: G_STORE [[C10]](s8), [[PTR_ADD2]](p0) :: (store (s8) into stack + 16)
; CHECK: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C15]](s64)
; CHECK: G_STORE [[C11]](s8), [[PTR_ADD3]](p0) :: (store 1 into stack + 24)
; CHECK: G_STORE [[C11]](s8), [[PTR_ADD3]](p0) :: (store (s8) into stack + 24)
; CHECK: BL @i8i16callee, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3, implicit $w4, implicit $x5, implicit $x6, implicit $x7, implicit-def $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: ADJCALLSTACKUP 32, 0, implicit-def $sp, implicit $sp
@ -230,7 +230,7 @@ define void @arg_v2i64(<2 x i64> %arg) {
; CHECK: liveins: $q0
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
; CHECK: G_STORE [[COPY]](<2 x s64>), [[DEF]](p0) :: (store 16 into `<2 x i64>* undef`)
; CHECK: G_STORE [[COPY]](<2 x s64>), [[DEF]](p0) :: (store (<2 x s64>) into `<2 x i64>* undef`)
; CHECK: RET_ReallyLR
store <2 x i64> %arg, <2 x i64>* undef
ret void
@ -246,7 +246,7 @@ define void @arg_v8i64(<8 x i64> %arg) {
; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s64>) = COPY $q3
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>), [[COPY2]](<2 x s64>), [[COPY3]](<2 x s64>)
; CHECK: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
; CHECK: G_STORE [[CONCAT_VECTORS]](<8 x s64>), [[DEF]](p0) :: (store 64 into `<8 x i64>* undef`)
; CHECK: G_STORE [[CONCAT_VECTORS]](<8 x s64>), [[DEF]](p0) :: (store (<8 x s64>) into `<8 x i64>* undef`)
; CHECK: RET_ReallyLR
store <8 x i64> %arg, <8 x i64>* undef
ret void
@ -259,7 +259,7 @@ define void @arg_v4f32(<4 x float> %arg) {
; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
; CHECK: [[BITCAST:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY]](<2 x s64>)
; CHECK: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
; CHECK: G_STORE [[BITCAST]](<4 x s32>), [[DEF]](p0) :: (store 16 into `<4 x float>* undef`)
; CHECK: G_STORE [[BITCAST]](<4 x s32>), [[DEF]](p0) :: (store (<4 x s32>) into `<4 x float>* undef`)
; CHECK: RET_ReallyLR
store <4 x float> %arg, <4 x float>* undef
ret void
@ -279,7 +279,7 @@ define void @ret_arg_v16f32(<16 x float> %arg) {
; CHECK: [[BITCAST3:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[COPY3]](<2 x s64>)
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BITCAST]](<4 x s32>), [[BITCAST1]](<4 x s32>), [[BITCAST2]](<4 x s32>), [[BITCAST3]](<4 x s32>)
; CHECK: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
; CHECK: G_STORE [[CONCAT_VECTORS]](<16 x s32>), [[DEF]](p0) :: (store 64 into `<16 x float>* undef`)
; CHECK: G_STORE [[CONCAT_VECTORS]](<16 x s32>), [[DEF]](p0) :: (store (<16 x s32>) into `<16 x float>* undef`)
; CHECK: RET_ReallyLR
store <16 x float> %arg, <16 x float>* undef
ret void

View File

@ -32,7 +32,7 @@ define i128 @ABIi128(i128 %arg1) {
ret i128 %res
}
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(<3 x s32>), %4:_(p0) :: (store 12 into %ir.addr + 16, align 16, basealign 32) (in function: odd_vector)
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3:_(<3 x s32>), %4:_(p0) :: (store (s96) into %ir.addr + 16, align 16, basealign 32) (in function: odd_vector)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
define void @odd_vector(<7 x i32>* %addr) {

View File

@ -15,12 +15,12 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
; O0: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
; O0: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; O0: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
; O0: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.gep1)
; O0: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1)
; O0: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
; O0: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64)
; O0: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; O0: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C1]](s64)
; O0: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load 4 from %ir.gep2)
; O0: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.gep2)
; O0: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[LOAD1]]
; O0: $w0 = COPY [[ADD]](s32)
; O0: RET_ReallyLR implicit $w0
@ -34,10 +34,10 @@ define i32 @cse_gep([4 x i32]* %ptr, i32 %idx) {
; O3: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
; O3: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
; O3: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
; O3: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.gep1)
; O3: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p0) :: (load (s32) from %ir.gep1)
; O3: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; O3: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
; O3: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load 4 from %ir.gep2)
; O3: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD1]](p0) :: (load (s32) from %ir.gep2)
; O3: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD1]], [[LOAD1]]
; O3: $w0 = COPY [[ADD]](s32)
; O3: RET_ReallyLR implicit $w0

View File

@ -11,8 +11,8 @@
; CHECK-NOT: id: 1
; CHECK: [[GUARD_SLOT:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.StackGuardSlot
; CHECK: [[GUARD:%[0-9]+]]:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard)
; CHECK: G_STORE [[GUARD]](p0), [[GUARD_SLOT]](p0) :: (volatile store 8 into %stack.0.StackGuardSlot)
; CHECK: [[GUARD:%[0-9]+]]:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load (p0) from @__stack_chk_guard)
; CHECK: G_STORE [[GUARD]](p0), [[GUARD_SLOT]](p0) :: (volatile store (p0) into %stack.0.StackGuardSlot)
declare void @llvm.stackprotector(i8*, i8**)
define void @test_stack_guard_remat2() {
%StackGuardSlot = alloca i8*

View File

@ -784,7 +784,7 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[MUL]](s64)
; CHECK: [[C112:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C112]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load 8 from %ir.tmp59)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[PTR_ADD1]](p0) :: (load (p0) from %ir.tmp59)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: $x0 = COPY [[COPY]](p0)
; CHECK: $x1 = COPY [[LOAD]](p0)
@ -1106,7 +1106,7 @@ define void @jt_2_tables_phi_edge_from_second() {
; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; CHECK: [[C21:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4 from `i32* undef`, align 8)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C]]
; CHECK: G_BRCOND [[ICMP]](s1), %bb.6
; CHECK: G_BR %bb.19

View File

@ -346,12 +346,12 @@ define void @trunc(i64 %a) {
; CHECK-LABEL: name: load
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 16)
; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42, addrspace 42)
; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr, align 16)
; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load (s64) from %ir.addr42, addrspace 42)
; CHECK: [[SUM2:%.*]]:_(s64) = G_ADD [[VAL1]], [[VAL2]]
; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load 8 from %ir.addr)
; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load (s64) from %ir.addr)
; CHECK: [[SUM3:%[0-9]+]]:_(s64) = G_ADD [[SUM2]], [[VAL3]]
; CHECK: [[VAL4:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, !range !0)
; CHECK: [[VAL4:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr, !range !0)
; CHECK: [[SUM4:%[0-9]+]]:_(s64) = G_ADD [[SUM3]], [[VAL4]]
; CHECK: $x0 = COPY [[SUM4]]
; CHECK: RET_ReallyLR implicit $x0
@ -374,9 +374,9 @@ define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) {
; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY $x1
; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY $x3
; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 16)
; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42, addrspace 42)
; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store 8 into %ir.addr)
; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store (s64) into %ir.addr, align 16)
; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store (s64) into %ir.addr42, addrspace 42)
; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store (s64) into %ir.addr)
; CHECK: RET_ReallyLR
define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) {
store i64 %val1, i64* %addr, align 16
@ -602,13 +602,13 @@ define i8* @test_constant_null() {
; CHECK-LABEL: name: test_struct_memops
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[VAL1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.addr + 4)
; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store 1 into %ir.addr, align 4)
; CHECK: [[VAL2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr + 4)
; CHECK: G_STORE [[VAL1]](s8), [[ADDR]](p0) :: (store (s8) into %ir.addr, align 4)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store 4 into %ir.addr + 4)
; CHECK: G_STORE [[VAL2]](s32), [[GEP2]](p0) :: (store (s32) into %ir.addr + 4)
define void @test_struct_memops({ i8, i32 }* %addr) {
%val = load { i8, i32 }, { i8, i32 }* %addr
store { i8, i32 } %val, { i8, i32 }* %addr
@ -617,8 +617,8 @@ define void @test_struct_memops({ i8, i32 }* %addr) {
; CHECK-LABEL: name: test_i1_memops
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load 1 from %ir.addr)
; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store 1 into %ir.addr)
; CHECK: [[VAL:%[0-9]+]]:_(s1) = G_LOAD [[ADDR]](p0) :: (load (s1) from %ir.addr)
; CHECK: G_STORE [[VAL]](s1), [[ADDR]](p0) :: (store (s1) into %ir.addr)
define void @test_i1_memops(i1* %addr) {
%val = load i1, i1* %addr
store i1 %val, i1* %addr
@ -709,10 +709,10 @@ define float @test_frem(float %arg1, float %arg2) {
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SADDO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %lhs, i32 %rhs)
@ -725,10 +725,10 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UADDO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32)
define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs)
@ -741,10 +741,10 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SSUBO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.subr)
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.subr + 4, align 4)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
%res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %lhs, i32 %rhs)
@ -757,10 +757,10 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_USUBO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.subr)
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.subr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.subr + 4, align 4)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.subr + 4, align 4)
declare { i32, i1 } @llvm.usub.with.overflow.i32(i32, i32)
define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
%res = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %lhs, i32 %rhs)
@ -773,10 +773,10 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_SMULO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32)
define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %lhs, i32 %rhs)
@ -789,10 +789,10 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: [[VAL:%[0-9]+]]:_(s32), [[OVERFLOW:%[0-9]+]]:_(s1) = G_UMULO [[LHS]], [[RHS]]
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store 4 into %ir.addr)
; CHECK: G_STORE [[VAL]](s32), [[ADDR]](p0) :: (store (s32) into %ir.addr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: G_STORE [[OVERFLOW]](s1), [[GEP]](p0) :: (store (s1) into %ir.addr + 4, align 4)
declare { i32, i1 } @llvm.umul.with.overflow.i32(i32, i32)
define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
%res = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %lhs, i32 %rhs)
@ -802,16 +802,16 @@ define void @test_umul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK-LABEL: name: test_extractvalue
; CHECK: %0:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: $w0 = COPY [[LD3]](s32)
%struct.nested = type {i8, { i8, i32 }, i32}
define i32 @test_extractvalue(%struct.nested* %addr) {
@ -823,19 +823,19 @@ define i32 @test_extractvalue(%struct.nested* %addr) {
; CHECK-LABEL: name: test_extractvalue_agg
; CHECK: %0:_(p0) = COPY $x0
; CHECK: %1:_(p0) = COPY $x1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store 1 into %ir.addr2, align 4)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: G_STORE [[LD2]](s8), %1(p0) :: (store (s8) into %ir.addr2, align 4)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store 4 into %ir.addr2 + 4)
; CHECK: G_STORE [[LD3]](s32), [[GEP4]](p0) :: (store (s32) into %ir.addr2 + 4)
define void @test_extractvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%struct = load %struct.nested, %struct.nested* %addr
%res = extractvalue %struct.nested %struct, 1
@ -857,23 +857,23 @@ define void @test_trivial_extract_ptr([1 x i8*] %s, i8 %val) {
; CHECK-LABEL: name: test_insertvalue
; CHECK: %0:_(p0) = COPY $x0
; CHECK: %1:_(s32) = COPY $w1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[LD2:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store 1 into %ir.addr, align 4)
; CHECK: [[LD4:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: G_STORE [[LD1]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4)
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: G_STORE [[LD2]](s8), [[GEP4]](p0) :: (store (s8) into %ir.addr + 4, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store 4 into %ir.addr + 8)
; CHECK: G_STORE %1(s32), [[GEP5]](p0) :: (store (s32) into %ir.addr + 8)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store 4 into %ir.addr + 12)
; CHECK: G_STORE [[LD4]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 12)
define void @test_insertvalue(%struct.nested* %addr, i32 %val) {
%struct = load %struct.nested, %struct.nested* %addr
%newstruct = insertvalue %struct.nested %struct, i32 %val, 1, 1
@ -902,26 +902,26 @@ define [1 x i8*] @test_trivial_insert_ptr([1 x i8*] %s, i8* %val) {
; CHECK-LABEL: name: test_insertvalue_agg
; CHECK: %0:_(p0) = COPY $x0
; CHECK: %1:_(p0) = COPY $x1
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load 1 from %ir.addr2, align 4)
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD %1(p0) :: (load (s8) from %ir.addr2, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %1, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.addr2 + 4)
; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr, align 4)
; CHECK: [[LD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load (s32) from %ir.addr2 + 4)
; CHECK: [[LD3:%[0-9]+]]:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr, align 4)
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load 1 from %ir.addr + 4, align 4)
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[GEP2]](p0) :: (load (s8) from %ir.addr + 4, align 4)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load 4 from %ir.addr + 8)
; CHECK: [[LD5:%[0-9]+]]:_(s32) = G_LOAD [[GEP3]](p0) :: (load (s32) from %ir.addr + 8)
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.addr + 12)
; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store 1 into %ir.addr, align 4)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.addr + 12)
; CHECK: G_STORE [[LD3]](s8), %0(p0) :: (store (s8) into %ir.addr, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store 1 into %ir.addr + 4, align 4)
; CHECK: G_STORE [[LD1]](s8), [[GEP5]](p0) :: (store (s8) into %ir.addr + 4, align 4)
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store 4 into %ir.addr + 8)
; CHECK: G_STORE [[LD2]](s32), [[GEP6]](p0) :: (store (s32) into %ir.addr + 8)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST4]](s64)
; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store 4 into %ir.addr + 12)
; CHECK: G_STORE [[LD6]](s32), [[GEP7]](p0) :: (store (s32) into %ir.addr + 12)
define void @test_insertvalue_agg(%struct.nested* %addr, {i8, i32}* %addr2) {
%smallstruct = load {i8, i32}, {i8, i32}* %addr2
%struct = load %struct.nested, %struct.nested* %addr
@ -1138,7 +1138,7 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
@ -1148,7 +1148,7 @@ define void @test_memcpy_tail(i8* %dst, i8* %src, i64 %size) {
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMCPY [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
@ -1159,7 +1159,7 @@ define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %sr
; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMCPY [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1)
; CHECK: G_MEMCPY [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 1)
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0)
ret void
}
@ -1170,7 +1170,7 @@ define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMMOVE [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMMOVE [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
ret void
}
@ -1182,7 +1182,7 @@ define void @test_memset(i8* %dst, i8 %val, i64 %size) {
; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMSET [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store 1 into %ir.dst)
; CHECK: G_MEMSET [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store (s8) into %ir.dst)
call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
ret void
}
@ -1464,12 +1464,12 @@ define void @test_lifetime_intrin() {
define void @test_load_store_atomics(i8* %addr) {
; CHECK-LABEL: name: test_load_store_atomics
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered 1 from %ir.addr)
; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic 1 into %ir.addr)
; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire 1 from %ir.addr)
; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release 1 into %ir.addr)
; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst 1 from %ir.addr)
; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic 1 into %ir.addr)
; CHECK: [[V0:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load unordered (s8) from %ir.addr)
; CHECK: G_STORE [[V0]](s8), [[ADDR]](p0) :: (store monotonic (s8) into %ir.addr)
; CHECK: [[V1:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load acquire (s8) from %ir.addr)
; CHECK: G_STORE [[V1]](s8), [[ADDR]](p0) :: (store release (s8) into %ir.addr)
; CHECK: [[V2:%[0-9]+]]:_(s8) = G_LOAD [[ADDR]](p0) :: (load syncscope("singlethread") seq_cst (s8) from %ir.addr)
; CHECK: G_STORE [[V2]](s8), [[ADDR]](p0) :: (store syncscope("singlethread") monotonic (s8) into %ir.addr)
%v0 = load atomic i8, i8* %addr unordered, align 1
store atomic i8 %v0, i8* %addr monotonic, align 1
@ -1778,7 +1778,7 @@ define <4 x half> @test_constant_vector() {
define i32 @test_target_mem_intrinsic(i32* %addr) {
; CHECK-LABEL: name: test_target_mem_intrinsic
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load 4 from %ir.addr)
; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load (s32) from %ir.addr)
; CHECK: G_TRUNC [[VAL]](s64)
%val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
%trunc = trunc i64 %val to i32
@ -1850,33 +1850,33 @@ define void @test_phi_diamond({ i8, i16, i32 }* %a.ptr, { i8, i16, i32 }* %b.ptr
; CHECK: G_BRCOND [[TRUNC]](s1), %bb.2
; CHECK: G_BR %bb.3
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load 1 from %ir.a.ptr, align 4)
; CHECK: [[LD1:%[0-9]+]]:_(s8) = G_LOAD [[ARG1]](p0) :: (load (s8) from %ir.a.ptr, align 4)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load 2 from %ir.a.ptr + 2)
; CHECK: [[LD2:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p0) :: (load (s16) from %ir.a.ptr + 2)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG1]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.a.ptr + 4)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.a.ptr + 4)
; CHECK: G_BR %bb.4
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load 1 from %ir.b.ptr, align 4)
; CHECK: [[LD4:%[0-9]+]]:_(s8) = G_LOAD [[ARG2]](p0) :: (load (s8) from %ir.b.ptr, align 4)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST3]](s64)
; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load 2 from %ir.b.ptr + 2)
; CHECK: [[LD5:%[0-9]+]]:_(s16) = G_LOAD [[GEP3]](p0) :: (load (s16) from %ir.b.ptr + 2)
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG2]], [[CST4]](s64)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.b.ptr + 4)
; CHECK: [[LD6:%[0-9]+]]:_(s32) = G_LOAD [[GEP4]](p0) :: (load (s32) from %ir.b.ptr + 4)
; CHECK: [[PN1:%[0-9]+]]:_(s8) = G_PHI [[LD1]](s8), %bb.2, [[LD4]](s8), %bb.3
; CHECK: [[PN2:%[0-9]+]]:_(s16) = G_PHI [[LD2]](s16), %bb.2, [[LD5]](s16), %bb.3
; CHECK: [[PN3:%[0-9]+]]:_(s32) = G_PHI [[LD3]](s32), %bb.2, [[LD6]](s32), %bb.3
; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store 1 into %ir.dst, align 4)
; CHECK: G_STORE [[PN1]](s8), [[ARG4]](p0) :: (store (s8) into %ir.dst, align 4)
; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST5]](s64)
; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store 2 into %ir.dst + 2)
; CHECK: G_STORE [[PN2]](s16), [[GEP5]](p0) :: (store (s16) into %ir.dst + 2)
; CHECK: [[CST6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[ARG4]], [[CST6]](s64)
; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store 4 into %ir.dst + 4)
; CHECK: G_STORE [[PN3]](s32), [[GEP6]](p0) :: (store (s32) into %ir.dst + 4)
; CHECK: RET_ReallyLR
entry:
@ -1909,25 +1909,25 @@ define void @test_nested_aggregate_const(%agg.nested *%ptr) {
; CHECK: [[CST4:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
; CHECK: [[CST5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[CST6:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store 4 into %ir.ptr, align 8)
; CHECK: G_STORE [[CST1]](s32), [[BASE]](p0) :: (store (s32) into %ir.ptr, align 8)
; CHECK: [[CST7:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST7]](s64)
; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store 4 into %ir.ptr + 4)
; CHECK: G_STORE [[CST1]](s32), [[GEP1]](p0) :: (store (s32) into %ir.ptr + 4)
; CHECK: [[CST8:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST8]](s64)
; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store 2 into %ir.ptr + 8, align 8)
; CHECK: G_STORE [[CST2]](s16), [[GEP2]](p0) :: (store (s16) into %ir.ptr + 8, align 8)
; CHECK: [[CST9:%[0-9]+]]:_(s64) = G_CONSTANT i64 10
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST9]](s64)
; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store 1 into %ir.ptr + 10, align 2)
; CHECK: G_STORE [[CST3]](s8), [[GEP3]](p0) :: (store (s8) into %ir.ptr + 10, align 2)
; CHECK: [[CST10:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST10]](s64)
; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store 8 into %ir.ptr + 16)
; CHECK: G_STORE [[CST4]](s64), [[GEP4]](p0) :: (store (s64) into %ir.ptr + 16)
; CHECK: [[CST11:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST11]](s64)
; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store 8 into %ir.ptr + 24)
; CHECK: G_STORE [[CST5]](s64), [[GEP5]](p0) :: (store (s64) into %ir.ptr + 24)
; CHECK: [[CST12:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[BASE]], [[CST12]](s64)
; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store 4 into %ir.ptr + 32, align 8)
; CHECK: G_STORE [[CST6]](s32), [[GEP6]](p0) :: (store (s32) into %ir.ptr + 32, align 8)
store %agg.nested { i32 1, i32 1, %agg.inner { i16 2, i8 3, %agg.inner.inner {i64 5, i64 8} }, i32 13}, %agg.nested *%ptr
ret void
}
@ -1954,7 +1954,7 @@ define i32 @test_atomic_cmpxchg_1(i32* %addr) {
; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: bb.2.repeat:
; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic 4 on %ir.addr)
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic (s32) on %ir.addr)
; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
; CHECK-NEXT: G_BR %bb.2
; CHECK: bb.3.done:
@ -1980,7 +1980,7 @@ define i32 @test_weak_atomic_cmpxchg_1(i32* %addr) {
; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: bb.2.repeat:
; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic 4 on %ir.addr)
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s32), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store monotonic monotonic (s32) on %ir.addr)
; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
; CHECK-NEXT: G_BR %bb.2
; CHECK: bb.3.done:
@ -2006,7 +2006,7 @@ define i16 @test_atomic_cmpxchg_2(i16* %addr) {
; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK: bb.2.repeat:
; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s16), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst seq_cst 2 on %ir.addr)
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s16), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst seq_cst (s16) on %ir.addr)
; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
; CHECK-NEXT: G_BR %bb.2
; CHECK: bb.3.done:
@ -2032,7 +2032,7 @@ define i64 @test_atomic_cmpxchg_3(i64* %addr) {
; CHECK-NEXT: [[NEWVAL:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: bb.2.repeat:
; CHECK-NEXT: successors: %bb.3({{[^)]+}}), %bb.2({{[^)]+}})
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s64), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst acquire 8 on %ir.addr)
; CHECK: [[OLDVALRES:%[0-9]+]]:_(s64), [[SUCCESS:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[ADDR]](p0), [[OLDVAL]], [[NEWVAL]] :: (load store seq_cst acquire (s64) on %ir.addr)
; CHECK-NEXT: G_BRCOND [[SUCCESS]](s1), %bb.3
; CHECK-NEXT: G_BR %bb.2
; CHECK: bb.3.done:
@ -2055,7 +2055,7 @@ define i32 @test_atomicrmw_xchg(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XCHG [[ADDR]](p0), [[VAL]] :: (load store monotonic 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XCHG [[ADDR]](p0), [[VAL]] :: (load store monotonic (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw xchg i256* %addr, i256 1 monotonic
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2072,7 +2072,7 @@ define i32 @test_atomicrmw_add(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_ADD [[ADDR]](p0), [[VAL]] :: (load store acquire 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_ADD [[ADDR]](p0), [[VAL]] :: (load store acquire (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw add i256* %addr, i256 1 acquire
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2089,7 +2089,7 @@ define i32 @test_atomicrmw_sub(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_SUB [[ADDR]](p0), [[VAL]] :: (load store release 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_SUB [[ADDR]](p0), [[VAL]] :: (load store release (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw sub i256* %addr, i256 1 release
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2106,7 +2106,7 @@ define i32 @test_atomicrmw_and(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_AND [[ADDR]](p0), [[VAL]] :: (load store acq_rel 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_AND [[ADDR]](p0), [[VAL]] :: (load store acq_rel (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw and i256* %addr, i256 1 acq_rel
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2123,7 +2123,7 @@ define i32 @test_atomicrmw_nand(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_NAND [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_NAND [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw nand i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2140,7 +2140,7 @@ define i32 @test_atomicrmw_or(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_OR [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_OR [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw or i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2157,7 +2157,7 @@ define i32 @test_atomicrmw_xor(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XOR [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_XOR [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw xor i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2174,7 +2174,7 @@ define i32 @test_atomicrmw_min(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw min i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2191,7 +2191,7 @@ define i32 @test_atomicrmw_max(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_MAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw max i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2208,7 +2208,7 @@ define i32 @test_atomicrmw_umin(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMIN [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw umin i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2225,7 +2225,7 @@ define i32 @test_atomicrmw_umax(i256* %addr) {
; CHECK-NEXT: liveins: $x0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-NEXT: [[VAL:%[0-9]+]]:_(s256) = G_CONSTANT i256 1
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst 32 on %ir.addr)
; CHECK-NEXT: [[OLDVALRES:%[0-9]+]]:_(s256) = G_ATOMICRMW_UMAX [[ADDR]](p0), [[VAL]] :: (load store seq_cst (s256) on %ir.addr)
; CHECK-NEXT: [[RES:%[0-9]+]]:_(s32) = G_TRUNC [[OLDVALRES]]
%oldval = atomicrmw umax i256* %addr, i256 1 seq_cst
; FIXME: We currently can't lower 'ret i256' and it's not the purpose of this
@ -2342,7 +2342,7 @@ define float @test_nearbyint_f32(float %x) {
}
; CHECK-LABEL: name: test_llvm.aarch64.neon.ld3.v4i32.p0i32
; CHECK: %1:_(<4 x s32>), %2:_(<4 x s32>), %3:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld3), %0(p0) :: (load 48 from %ir.ptr, align 64)
; CHECK: %1:_(<4 x s32>), %2:_(<4 x s32>), %3:_(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld3), %0(p0) :: (load (s384) from %ir.ptr, align 64)
define void @test_llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr) {
%arst = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %ptr)
ret void

View File

@ -233,7 +233,7 @@ body: |
successors: %bb.2.end, %bb.1.then
liveins: $x0, $x1, $w2
%0 = LDRWui killed $x0, 0 :: (load 4 from %ir.src)
%0 = LDRWui killed $x0, 0 :: (load (s32) from %ir.src)
%5(s32) = COPY %0
%1(p0) = COPY $x1
%2 = COPY $w2
@ -245,7 +245,7 @@ body: |
bb.2.end:
%4(s32) = PHI %0, %bb.0.entry, %3, %bb.1.then
G_STORE killed %4, killed %1 :: (store 4 into %ir.dst)
G_STORE killed %4, killed %1 :: (store (s32) into %ir.dst)
RET_ReallyLR
...
@ -692,10 +692,10 @@ registers:
# CHECK: %0:gpr(s64) = COPY $x0
# CHECK-NEXT: %1:gpr(p0) = COPY $x1
# FAST-NEXT: %2:fpr(<2 x s32>) = G_BITCAST %0(s64)
# FAST-NEXT: %3:fpr(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
# FAST-NEXT: %3:fpr(<2 x s32>) = G_LOAD %1(p0) :: (load (<2 x s32>) from %ir.addr)
# FAST-NEXT: %4:fpr(<2 x s32>) = G_OR %2, %3
# GREEDY-NEXT: %2:gpr(<2 x s32>) = G_BITCAST %0(s64)
# GREEDY-NEXT: %3:gpr(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
# GREEDY-NEXT: %3:gpr(<2 x s32>) = G_LOAD %1(p0) :: (load (<2 x s32>) from %ir.addr)
# GREEDY-NEXT: %4:gpr(<2 x s32>) = G_OR %2, %3
# CHECK-NEXT: %5:gpr(s64) = G_BITCAST %4(<2 x s32>)
# CHECK-NEXT: $x0 = COPY %5(s64)
@ -707,7 +707,7 @@ body: |
%0(s64) = COPY $x0
%1(p0) = COPY $x1
%2(<2 x s32>) = G_BITCAST %0(s64)
%3(<2 x s32>) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
%3(<2 x s32>) = G_LOAD %1(p0) :: (load (<2 x s32>) from %ir.addr)
%4(<2 x s32>) = G_OR %2, %3
%5(s64) = G_BITCAST %4(<2 x s32>)
$x0 = COPY %5(s64)
@ -737,7 +737,7 @@ registers:
# No repairing should be necessary for both modes.
# CHECK: %0:gpr(s64) = COPY $x0
# CHECK-NEXT: %1:gpr(p0) = COPY $x1
# CHECK-NEXT: %2:fpr(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
# CHECK-NEXT: %2:fpr(s64) = G_LOAD %1(p0) :: (load (s64) from %ir.addr)
# %0 has been mapped to GPR, we need to repair to match FPR.
# CHECK-NEXT: %4:fpr(s64) = COPY %0
# CHECK-NEXT: %3:fpr(s64) = G_FADD %4, %2
@ -750,7 +750,7 @@ body: |
%0(s64) = COPY $x0
%1(p0) = COPY $x1
%2(s64) = G_LOAD %1(p0) :: (load 8 from %ir.addr)
%2(s64) = G_LOAD %1(p0) :: (load (s64) from %ir.addr)
%3(s64) = G_FADD %0, %2
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
@ -781,7 +781,7 @@ registers:
# CHECK-NEXT: %3:fpr(s64) = COPY %0
# CHECK-NEXT: %4:fpr(s64) = COPY %0
# CHECK-NEXT: %2:fpr(s64) = G_FADD %3, %4
# CHECK-NEXT: G_STORE %2(s64), %1(p0) :: (store 8 into %ir.addr)
# CHECK-NEXT: G_STORE %2(s64), %1(p0) :: (store (s64) into %ir.addr)
# CHECK-NEXT: RET_ReallyLR
body: |
@ -791,7 +791,7 @@ body: |
%0(s64) = COPY $x0
%1(p0) = COPY $x1
%2(s64) = G_FADD %0, %0
G_STORE %2(s64), %1(p0) :: (store 8 into %ir.addr)
G_STORE %2(s64), %1(p0) :: (store (s64) into %ir.addr)
RET_ReallyLR
...
@ -931,11 +931,11 @@ body: |
# If we didn't look through the copy for %0, the default mapping
# would have been on GPR and we would have to insert a copy to move
# the value away from FPR (h0).
# CHECK-NEXT: G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p.addr)
# CHECK-NEXT: G_STORE %0(s16), %1(p0) :: (store (s16) into %ir.p.addr)
# If we didn't look through the copy for %2, the default mapping
# would have been on GPR and we would have to insert a copy to move
# the value to FPR (h0).
# CHECK-NEXT: %2:fpr(s16) = G_LOAD %1(p0) :: (load 2 from %ir.p.addr)
# CHECK-NEXT: %2:fpr(s16) = G_LOAD %1(p0) :: (load (s16) from %ir.p.addr)
# CHECK-NEXT: $h0 = COPY %2(s16)
name: passFp16ViaAllocas
alignment: 4
@ -955,8 +955,8 @@ body: |
%0(s16) = COPY $h0
%1(p0) = G_FRAME_INDEX %stack.0.p.addr
G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p.addr)
%2(s16) = G_LOAD %1(p0) :: (load 2 from %ir.p.addr)
G_STORE %0(s16), %1(p0) :: (store (s16) into %ir.p.addr)
%2(s16) = G_LOAD %1(p0) :: (load (s16) from %ir.p.addr)
$h0 = COPY %2(s16)
RET_ReallyLR implicit $h0

View File

@ -53,9 +53,9 @@ define i32 @signext_param_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0, align 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
; CHECK: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[LOAD1]], 1
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_SEXT]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s1)
@ -80,9 +80,9 @@ define i32 @dont_need_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s32) from %fixed-stack.0, align 8)
; CHECK: $w0 = COPY [[LOAD1]](s32)
; CHECK: RET_ReallyLR implicit $w0
i64 %f, i64 %g, i64 %h, i64 %i,
@ -104,9 +104,9 @@ define i8 @s8_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0, align 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
; CHECK: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[LOAD1]], 8
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)

View File

@ -41,7 +41,6 @@ define i32 @zeroext_param_i32(i32 zeroext %x) {
; Zeroext param is passed on the stack. We should still get a G_ASSERT_ZEXT.
define i32 @zeroext_param_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
i64 %g, i64 %h, i64 %i, i1 zeroext %j) {
; CHECK-LABEL: name: zeroext_param_stack
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
@ -54,22 +53,21 @@ define i32 @zeroext_param_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f,
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0, align 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
; CHECK: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD1]], 1
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s1)
; CHECK: $w0 = COPY [[ZEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
i64 %g, i64 %h, i64 %i, i1 zeroext %j) {
%v = zext i1 %j to i32
ret i32 %v
}
; The zeroext parameter is a s32, so there's no extension required.
define i32 @dont_need_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
i64 %f, i64 %g, i64 %h, i64 %i,
i32 zeroext %j) {
; CHECK-LABEL: name: dont_need_assert_zext_stack
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
@ -82,18 +80,18 @@ define i32 @dont_need_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s32) from %fixed-stack.0, align 8)
; CHECK: $w0 = COPY [[LOAD1]](s32)
; CHECK: RET_ReallyLR implicit $w0
i64 %f, i64 %g, i64 %h, i64 %i,
i32 zeroext %j) {
ret i32 %j
}
; s8 requires extension to s32, so we should get a G_ASSERT_ZEXT here.
define i8 @s8_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
i64 %f, i64 %g, i64 %h, i64 %i,
i8 zeroext %j) {
; CHECK-LABEL: name: s8_assert_zext_stack
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7
@ -106,13 +104,15 @@ define i8 @s8_assert_zext_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e,
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0, align 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0, align 8)
; CHECK: [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[LOAD1]], 8
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
i64 %f, i64 %g, i64 %h, i64 %i,
i8 zeroext %j) {
ret i8 %j
}

View File

@ -2,17 +2,17 @@
; CHECK-LABEL: name: test_split_struct
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load 8 from %ir.ptr + 8)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load (s64) from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST2]](s64)
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 1)
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store (s64) into stack, align 1)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]](s64)
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 1)
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store (s64) into stack + 8, align 1)
define void @test_split_struct([2 x i64]* %ptr) {
%struct = load [2 x i64], [2 x i64]* %ptr
call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,

View File

@ -6,11 +6,11 @@
; CHECK-DAG: - { id: [[STACK0:[0-9]+]], type: default, offset: 0, size: 1,
; CHECK-DAG: - { id: [[STACK8:[0-9]+]], type: default, offset: 1, size: 1,
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s8) from %fixed-stack.1, align 16)
; CHECK: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[LOAD]], 8
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.0)
; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s8) from %fixed-stack.0)
; CHECK: [[ASSERT_SEXT1:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[LOAD1]], 8
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT1]](s32)
; CHECK: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
@ -28,10 +28,10 @@ define signext i8 @test_stack_slots([8 x i64], i8 signext %lhs, i8 signext %rhs)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C42_OFFS]](s64)
; CHECK: G_STORE [[C42]](s8), [[C42_LOC]](p0) :: (store 1 into stack)
; CHECK: G_STORE [[C42]](s8), [[C42_LOC]](p0) :: (store (s8) into stack)
; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C12_OFFS]](s64)
; CHECK: G_STORE [[C12]](s8), [[C12_LOC]](p0) :: (store 1 into stack + 1)
; CHECK: G_STORE [[C12]](s8), [[C12_LOC]](p0) :: (store (s8) into stack + 1)
; CHECK: BL @test_stack_slots
define void @test_call_stack() {
call signext i8 @test_stack_slots([8 x i64] undef, i8 signext 42, i8 signext 12)
@ -59,18 +59,18 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
}
; CHECK-LABEL: name: test_split_struct
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.ptr + 8)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD %3(p0) :: (load (s64) from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[OFF:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[OFF]](s64)
; CHECK: G_STORE [[LD1]](s64), [[ADDR]](p0) :: (store 8 into stack, align 1)
; CHECK: G_STORE [[LD1]](s64), [[ADDR]](p0) :: (store (s64) into stack, align 1)
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]]
; CHECK: G_STORE [[LD2]](s64), [[ADDR]](p0) :: (store 8 into stack + 8, align 1)
; CHECK: G_STORE [[LD2]](s64), [[ADDR]](p0) :: (store (s64) into stack + 8, align 1)
define void @test_split_struct([2 x i64]* %ptr) {
%struct = load [2 x i64], [2 x i64]* %ptr
call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,
@ -85,10 +85,10 @@ define void @test_split_struct([2 x i64]* %ptr) {
; CHECK-DAG: - { id: [[HI_FRAME:[0-9]+]], type: default, offset: 8, size: 8
; CHECK: [[LOPTR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[LO_FRAME]]
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD [[LOPTR]](p0) :: (invariant load 8 from %fixed-stack.[[LO_FRAME]], align 16)
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD [[LOPTR]](p0) :: (invariant load (s64) from %fixed-stack.[[LO_FRAME]], align 16)
; CHECK: [[HIPTR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[HI_FRAME]]
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[HIPTR]](p0) :: (invariant load 8 from %fixed-stack.[[HI_FRAME]])
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[HIPTR]](p0) :: (invariant load (s64) from %fixed-stack.[[HI_FRAME]])
define void @take_split_struct([2 x i64]* %ptr, i64, i64, i64,
i64, i64, i64,
[2 x i64] %in) {

View File

@ -51,8 +51,8 @@ define void @dont_tail_call_explicit_sret_alloca_dummyusers(i64* %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.l
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.ptr)
; CHECK: G_STORE [[LOAD]](s64), [[FRAME_INDEX]](p0) :: (store 8 into %ir.l)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.ptr)
; CHECK: G_STORE [[LOAD]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %ir.l)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: $x8 = COPY [[FRAME_INDEX]](p0)
; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
@ -90,7 +90,7 @@ define i64 @dont_tail_call_sret_alloca_returned() {
; CHECK: $x8 = COPY [[FRAME_INDEX]](p0)
; CHECK: BL @test_explicit_sret, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x8
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8 from %ir.l)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64) from %ir.l)
; CHECK: $x0 = COPY [[LOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%l = alloca i64, align 8

View File

@ -66,7 +66,7 @@ define void @test_outgoing_stack_args([8 x <2 x double>], <4 x half> %arg) {
; DARWIN: [[COPY6:%[0-9]+]]:_(<2 x s64>) = COPY $q6
; DARWIN: [[COPY7:%[0-9]+]]:_(<2 x s64>) = COPY $q7
; DARWIN: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; DARWIN: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.0, align 16)
; DARWIN: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.0, align 16)
; DARWIN: $d0 = COPY [[LOAD]](<4 x s16>)
; DARWIN: TCRETURNdi @outgoing_stack_args_fn, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $d0
; WINDOWS-LABEL: name: test_outgoing_stack_args
@ -81,7 +81,7 @@ define void @test_outgoing_stack_args([8 x <2 x double>], <4 x half> %arg) {
; WINDOWS: [[COPY6:%[0-9]+]]:_(<2 x s64>) = COPY $q6
; WINDOWS: [[COPY7:%[0-9]+]]:_(<2 x s64>) = COPY $q7
; WINDOWS: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; WINDOWS: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.0, align 16)
; WINDOWS: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.0, align 16)
; WINDOWS: $d0 = COPY [[LOAD]](<4 x s16>)
; WINDOWS: TCRETURNdi @outgoing_stack_args_fn, 0, csr_aarch64_aapcs, implicit $sp, implicit $d0
tail call void @outgoing_stack_args_fn(<4 x half> %arg)
@ -109,10 +109,10 @@ define i32 @test_too_big_stack() {
; DARWIN: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
; DARWIN: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; DARWIN: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; DARWIN: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; DARWIN: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store (s8) into stack)
; DARWIN: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; DARWIN: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; DARWIN: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 2, align 1)
; DARWIN: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store (s16) into stack + 2, align 1)
; DARWIN: BL @too_big_stack, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit-def $w0
; DARWIN: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0
; DARWIN: ADJCALLSTACKUP 4, 0, implicit-def $sp, implicit $sp
@ -135,10 +135,10 @@ define i32 @test_too_big_stack() {
; WINDOWS: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
; WINDOWS: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; WINDOWS: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; WINDOWS: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store 1 into stack)
; WINDOWS: G_STORE [[C]](s8), [[PTR_ADD]](p0) :: (store (s8) into stack)
; WINDOWS: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; WINDOWS: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; WINDOWS: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store 2 into stack + 8, align 1)
; WINDOWS: G_STORE [[C1]](s16), [[PTR_ADD1]](p0) :: (store (s16) into stack + 8, align 1)
; WINDOWS: BL @too_big_stack, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $x3, implicit $x4, implicit $x5, implicit $x6, implicit $x7, implicit-def $w0
; WINDOWS: [[COPY1:%[0-9]+]]:_(s32) = COPY $w0
; WINDOWS: ADJCALLSTACKUP 16, 0, implicit-def $sp, implicit $sp
@ -206,7 +206,7 @@ define void @test_varargs_2() {
; DARWIN: [[COPY:%[0-9]+]]:_(p0) = COPY $sp
; DARWIN: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; DARWIN: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; DARWIN: G_STORE [[C3]](s64), [[PTR_ADD]](p0) :: (store 8 into stack, align 1)
; DARWIN: G_STORE [[C3]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 1)
; DARWIN: BL @varargs, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $d0, implicit $x1
; DARWIN: ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp
; DARWIN: RET_ReallyLR
@ -242,7 +242,7 @@ define void @test_varargs_3([8 x <2 x double>], <4 x half> %arg) {
; DARWIN: [[COPY6:%[0-9]+]]:_(<2 x s64>) = COPY $q6
; DARWIN: [[COPY7:%[0-9]+]]:_(<2 x s64>) = COPY $q7
; DARWIN: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; DARWIN: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.0, align 16)
; DARWIN: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.0, align 16)
; DARWIN: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
; DARWIN: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
; DARWIN: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
@ -254,7 +254,7 @@ define void @test_varargs_3([8 x <2 x double>], <4 x half> %arg) {
; DARWIN: [[COPY8:%[0-9]+]]:_(p0) = COPY $sp
; DARWIN: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; DARWIN: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY8]], [[C4]](s64)
; DARWIN: G_STORE [[C3]](s64), [[PTR_ADD]](p0) :: (store 8 into stack, align 1)
; DARWIN: G_STORE [[C3]](s64), [[PTR_ADD]](p0) :: (store (s64) into stack, align 1)
; DARWIN: BL @varargs, csr_darwin_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $w0, implicit $d0, implicit $x1
; DARWIN: ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp
; DARWIN: RET_ReallyLR
@ -270,7 +270,7 @@ define void @test_varargs_3([8 x <2 x double>], <4 x half> %arg) {
; WINDOWS: [[COPY6:%[0-9]+]]:_(<2 x s64>) = COPY $q6
; WINDOWS: [[COPY7:%[0-9]+]]:_(<2 x s64>) = COPY $q7
; WINDOWS: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; WINDOWS: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.0, align 16)
; WINDOWS: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.0, align 16)
; WINDOWS: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
; WINDOWS: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
; WINDOWS: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12

View File

@ -65,13 +65,13 @@ define void @test_multiple_args(i64 %in) {
; CHECK: [[I8:%[0-9]+]]:_(s8) = G_TRUNC [[I8_C]]
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x2
; CHECK: G_STORE [[DBL]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr)
; CHECK: G_STORE [[DBL]](s64), [[ADDR]](p0) :: (store (s64) into %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: G_STORE [[I64]](s64), [[GEP1]](p0) :: (store 8 into %ir.addr + 8)
; CHECK: G_STORE [[I64]](s64), [[GEP1]](p0) :: (store (s64) into %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
; CHECK: G_STORE [[I8]](s8), [[GEP2]](p0) :: (store 1 into %ir.addr + 16, align 8)
; CHECK: G_STORE [[I8]](s8), [[GEP2]](p0) :: (store (s8) into %ir.addr + 16, align 8)
; CHECK: RET_ReallyLR
define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr) {
store {double, i64, i8} %in, {double, i64, i8}* %addr
@ -82,13 +82,13 @@ define void @test_struct_formal({double, i64, i8} %in, {double, i64, i8}* %addr)
; CHECK-LABEL: name: test_struct_return
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr)
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load (s64) from %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.addr + 8)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load (s64) from %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.addr + 16, align 8)
; CHECK: [[LD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load (s32) from %ir.addr + 16, align 8)
; CHECK: $d0 = COPY [[LD1]](s64)
; CHECK: $x0 = COPY [[LD2]](s64)
@ -101,16 +101,16 @@ define {double, i64, i32} @test_struct_return({double, i64, i32}* %addr) {
; CHECK-LABEL: name: test_arr_call
; CHECK: %0:_(p0) = COPY $x0
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.addr)
; CHECK: [[LD1:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.addr)
; CHECK: [[CST1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST1]](s64)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.addr + 8)
; CHECK: [[LD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load (s64) from %ir.addr + 8)
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST2]](s64)
; CHECK: [[LD3:%[0-9]+]]:_(s64) = G_LOAD [[GEP2]](p0) :: (load 8 from %ir.addr + 16)
; CHECK: [[LD3:%[0-9]+]]:_(s64) = G_LOAD [[GEP2]](p0) :: (load (s64) from %ir.addr + 16)
; CHECK: [[CST3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD %0, [[CST3]](s64)
; CHECK: [[LD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP3]](p0) :: (load 8 from %ir.addr + 24)
; CHECK: [[LD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP3]](p0) :: (load (s64) from %ir.addr + 24)
; CHECK: $x0 = COPY [[LD1]](s64)
; CHECK: $x1 = COPY [[LD2]](s64)
@ -155,7 +155,7 @@ define void @test_abi_exts_call(i8* %addr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.addr)
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8) from %ir.addr)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
; CHECK: $w0 = COPY [[ZEXT]](s32)
@ -173,7 +173,7 @@ define void @test_zext_in_callee(i8* %addr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.addr)
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8) from %ir.addr)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
; CHECK: $w0 = COPY [[SEXT]](s32)
@ -213,11 +213,11 @@ define zeroext i8 @test_abi_zext_ret(i8* %addr) {
; CHECK-DAG: - { id: [[STACK8:[0-9]+]], type: default, offset: 8, size: 8,
; CHECK-DAG: - { id: [[STACK16:[0-9]+]], type: default, offset: 16, size: 8,
; CHECK: [[LHS_ADDR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[STACK0]]
; CHECK: [[LHS:%[0-9]+]]:_(s64) = G_LOAD [[LHS_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK0]], align 16)
; CHECK: [[LHS:%[0-9]+]]:_(s64) = G_LOAD [[LHS_ADDR]](p0) :: (invariant load (s64) from %fixed-stack.[[STACK0]], align 16)
; CHECK: [[RHS_ADDR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[STACK8]]
; CHECK: [[RHS:%[0-9]+]]:_(s64) = G_LOAD [[RHS_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK8]])
; CHECK: [[RHS:%[0-9]+]]:_(s64) = G_LOAD [[RHS_ADDR]](p0) :: (invariant load (s64) from %fixed-stack.[[STACK8]])
; CHECK: [[ADDR_ADDR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[STACK16]]
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_LOAD [[ADDR_ADDR]](p0) :: (invariant load 8 from %fixed-stack.[[STACK16]], align 16)
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_LOAD [[ADDR_ADDR]](p0) :: (invariant load (s64) from %fixed-stack.[[STACK16]], align 16)
; CHECK: [[SUM:%[0-9]+]]:_(s64) = G_ADD [[LHS]], [[RHS]]
; CHECK: G_STORE [[SUM]](s64), [[ADDR]](p0)
define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
@ -234,13 +234,13 @@ define void @test_stack_slots([8 x i64], i64 %lhs, i64 %rhs, i64* %addr) {
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[C42_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C42_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C42_OFFS]](s64)
; CHECK: G_STORE [[C42]](s64), [[C42_LOC]](p0) :: (store 8 into stack, align 1)
; CHECK: G_STORE [[C42]](s64), [[C42_LOC]](p0) :: (store (s64) into stack, align 1)
; CHECK: [[C12_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[C12_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[C12_OFFS]](s64)
; CHECK: G_STORE [[C12]](s64), [[C12_LOC]](p0) :: (store 8 into stack + 8, align 1)
; CHECK: G_STORE [[C12]](s64), [[C12_LOC]](p0) :: (store (s64) into stack + 8, align 1)
; CHECK: [[PTR_OFFS:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_LOC:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[PTR_OFFS]](s64)
; CHECK: G_STORE [[PTR]](p0), [[PTR_LOC]](p0) :: (store 8 into stack + 16, align 1)
; CHECK: G_STORE [[PTR]](p0), [[PTR_LOC]](p0) :: (store (s64) into stack + 16, align 1)
; CHECK: BL @test_stack_slots
; CHECK: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
define void @test_call_stack() {
@ -253,7 +253,7 @@ define void @test_call_stack() {
; CHECK-NEXT: - { id: [[SLOT:[0-9]+]], type: default, offset: 0, size: 1, alignment: 16, stack-id: default,
; CHECK-NEXT: isImmutable: true,
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[SLOT]]
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[ADDR]](p0) :: (invariant load 1 from %fixed-stack.[[SLOT]], align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[ADDR]](p0) :: (invariant load (s8) from %fixed-stack.[[SLOT]], align 16)
; CHECK-NEXT: {{%[0-9]+}}:_(s1) = G_TRUNC [[LOAD]]
define void @test_mem_i1([8 x i64], i1 %in) {
ret void
@ -281,17 +281,17 @@ define void @take_128bit_struct([2 x i64]* %ptr, [2 x i64] %in) {
; CHECK-LABEL: name: test_split_struct
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.ptr)
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD %0(p0) :: (load (s64) from %ir.ptr)
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[ADDR]], [[CST]](s64)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load 8 from %ir.ptr + 8)
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p0) :: (load (s64) from %ir.ptr + 8)
; CHECK: [[SP:%[0-9]+]]:_(p0) = COPY $sp
; CHECK: [[CST2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST2]](s64)
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store 8 into stack, align 1)
; CHECK: G_STORE [[LO]](s64), [[GEP2]](p0) :: (store (s64) into stack, align 1)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[SP]], [[CST]](s64)
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store 8 into stack + 8, align 1)
; CHECK: G_STORE [[HI]](s64), [[GEP3]](p0) :: (store (s64) into stack + 8, align 1)
define void @test_split_struct([2 x i64]* %ptr) {
%struct = load [2 x i64], [2 x i64]* %ptr
call void @take_split_struct([2 x i64]* null, i64 1, i64 2, i64 3,
@ -306,10 +306,10 @@ define void @test_split_struct([2 x i64]* %ptr) {
; CHECK-DAG: - { id: [[HI_FRAME:[0-9]+]], type: default, offset: 8, size: 8
; CHECK: [[LOPTR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[LO_FRAME]]
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD [[LOPTR]](p0) :: (invariant load 8 from %fixed-stack.[[LO_FRAME]], align 16)
; CHECK: [[LO:%[0-9]+]]:_(s64) = G_LOAD [[LOPTR]](p0) :: (invariant load (s64) from %fixed-stack.[[LO_FRAME]], align 16)
; CHECK: [[HIPTR:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.[[HI_FRAME]]
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[HIPTR]](p0) :: (invariant load 8 from %fixed-stack.[[HI_FRAME]])
; CHECK: [[HI:%[0-9]+]]:_(s64) = G_LOAD [[HIPTR]](p0) :: (invariant load (s64) from %fixed-stack.[[HI_FRAME]])
define void @take_split_struct([2 x i64]* %ptr, i64, i64, i64,
i64, i64, i64,
[2 x i64] %in) {

View File

@ -64,15 +64,15 @@ body: |
%6:_(s64) = G_CONSTANT i64 0
%7:_(p0) = G_PTR_ADD %5, %6(s64), debug-location !8
%8:_(s64) = G_ANYEXT %2(s32), debug-location !8
G_STORE %8(s64), %7(p0), debug-location !8 :: (store 8 into stack, align 1)
G_STORE %8(s64), %7(p0), debug-location !8 :: (store (s64) into stack, align 1)
BL @printf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, debug-location !8
ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp, debug-location !8
%13:_(s64) = G_LOAD %10(p0), debug-location !9 :: (load 4 from `i32* undef`)
%13:_(s64) = G_LOAD %10(p0), debug-location !9 :: (load (s32) from `i32* undef`)
ADJCALLSTACKDOWN 8, 0, implicit-def $sp, implicit $sp
$x0 = COPY %4(p0)
%11:_(p0) = COPY $sp
%12:_(p0) = G_PTR_ADD %11, %6(s64)
G_STORE %13(s64), %12(p0) :: (store 8 into stack, align 1)
G_STORE %13(s64), %12(p0) :: (store (s64) into stack, align 1)
BL @printf, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0
ADJCALLSTACKUP 8, 0, implicit-def $sp, implicit $sp
RET_ReallyLR

View File

@ -20,13 +20,13 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1028443341
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store 4)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store (s32))
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%3:_(s32) = G_FCONSTANT float 0x3FA99999A0000000
%1:_(s64) = G_CONSTANT i64 524
%2:_(p0) = G_PTR_ADD %0, %1(s64)
G_STORE %3(s32), %2(p0) :: (store 4)
G_STORE %3(s32), %2(p0) :: (store (s32))
RET_ReallyLR
...
---
@ -43,11 +43,11 @@ body: |
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: %c:_(s64) = G_CONSTANT i64 0
; CHECK: G_STORE %c(s64), %ptr(p0) :: (store 8)
; CHECK: G_STORE %c(s64), %ptr(p0) :: (store (s64))
; CHECK: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%c:_(s64) = G_FCONSTANT double 0.0
G_STORE %c(s64), %ptr(p0) :: (store 8)
G_STORE %c(s64), %ptr(p0) :: (store (s64))
RET_ReallyLR
...
---

View File

@ -11,11 +11,11 @@ body: |
; CHECK-LABEL: name: test_combine_sext_trunc_of_sextload
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 2)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SEXTLOAD]](s64)
; CHECK: $w0 = COPY [[TRUNC]](s32)
%0:_(p0) = COPY $x0
%1:_(s64) = G_SEXTLOAD %0:_(p0) :: (load 2)
%1:_(s64) = G_SEXTLOAD %0:_(p0) :: (load (s16))
%2:_(s32) = G_TRUNC %1:_(s64)
%3:_(s32) = G_SEXT_INREG %2:_(s32), 16
$w0 = COPY %3(s32)
@ -30,10 +30,10 @@ body: |
; CHECK-LABEL: name: test_combine_sext_of_sextload
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 2)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_SEXTLOAD %0:_(p0) :: (load 2)
%1:_(s32) = G_SEXTLOAD %0:_(p0) :: (load (s16))
%2:_(s32) = COPY %1:_(s32)
%3:_(s32) = G_SEXT_INREG %2:_(s32), 16
$w0 = COPY %3(s32)
@ -49,10 +49,10 @@ body: |
; CHECK-LABEL: name: test_combine_sext_of_sextload_not_matching
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 2)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s16))
; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_SEXTLOAD %0:_(p0) :: (load 2)
%1:_(s32) = G_SEXTLOAD %0:_(p0) :: (load (s16))
%2:_(s32) = COPY %1:_(s32)
%3:_(s32) = G_SEXT_INREG %2:_(s32), 24
$w0 = COPY %3(s32)

View File

@ -20,7 +20,7 @@ body: |
; CHECK: bb.1:
; CHECK: successors:
; CHECK: bb.2:
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load 4 from `i32* undef`, align 8)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF1]](p0) :: (load (s32) from `i32* undef`, align 8)
; CHECK: [[MUL:%[0-9]+]]:_(s32) = nsw G_MUL [[C]], [[LOAD]]
; CHECK: [[MUL1:%[0-9]+]]:_(s32) = nsw G_MUL [[MUL]], [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
@ -45,7 +45,7 @@ body: |
bb.3:
%2:_(s32) = G_LOAD %3(p0) :: (load 4 from `i32* undef`, align 8)
%2:_(s32) = G_LOAD %3(p0) :: (load (s32) from `i32* undef`, align 8)
%5:_(s32) = nsw G_MUL %4, %2
%7:_(s32) = nsw G_MUL %5, %6
%9:_(s32) = nsw G_MUL %7, %8

View File

@ -17,14 +17,14 @@ define i32 @main() #0 !dbg !14 {
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var2, debug-location !DILocation(line: 0, scope: !22)
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store 4 into %ir.retval)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0), debug-location !17 :: (dereferenceable load 4 from @var1)
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.retval)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[GV]](p0), debug-location !17 :: (dereferenceable load (s32) from @var1)
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[LOAD]](s32), [[C1]], debug-location !19
; CHECK: G_BRCOND [[ICMP]](s1), %bb.2, debug-location !20
; CHECK: G_BR %bb.3, debug-location !20
; CHECK: bb.2.if.then:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: G_STORE [[C2]](s32), [[GV1]](p0), debug-location !21 :: (store 4 into @var2)
; CHECK: G_STORE [[C2]](s32), [[GV1]](p0), debug-location !21 :: (store (s32) into @var2)
; CHECK: bb.3.if.end:
; CHECK: $w0 = COPY [[C]](s32), debug-location !24
; CHECK: RET_ReallyLR implicit $w0, debug-location !24

View File

@ -21,13 +21,13 @@ body: |
; CHECK: DBG_VALUE [[C1]](s64), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 3, column: 1, scope: !6)
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64), debug-location !DILocation(line: 4, column: 1, scope: !6)
; CHECK: DBG_VALUE [[PTR_ADD]](p0), $noreg, !9, !DIExpression(), debug-location !DILocation(line: 4, column: 1, scope: !6)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0), debug-location !DILocation(line: 5, column: 1, scope: !6) :: (store 4)
; CHECK: G_STORE [[C]](s32), [[PTR_ADD]](p0), debug-location !DILocation(line: 5, column: 1, scope: !6) :: (store (s32))
; CHECK: DBG_VALUE 0, $noreg, !9, !DIExpression(), debug-location !DILocation(line: 5, column: 1, scope: !6)
; CHECK: RET_ReallyLR debug-location !DILocation(line: 6, column: 1, scope: !6)
%0:_(p0) = COPY $x0
%3:_(s32) = G_FCONSTANT float 0x3FA99999A0000000
%1:_(s64) = G_CONSTANT i64 524
%2:_(p0) = G_PTR_ADD %0, %1(s64)
G_STORE %3(s32), %2(p0) :: (store 4)
G_STORE %3(s32), %2(p0) :: (store (s32))
RET_ReallyLR
...

View File

@ -20,11 +20,11 @@ body: |
; CHECK-LABEL: name: contract_s64_gpr
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: STRXui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
; CHECK: STRXui [[COPY1]], [[COPY]], 0 :: (store (s64) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:fpr(s64) = COPY %1
G_STORE %2:fpr(s64), %0 :: (store 8 into %ir.addr)
G_STORE %2:fpr(s64), %0 :: (store (s64) into %ir.addr)
...
---
name: contract_s32_gpr
@ -36,11 +36,11 @@ body: |
; CHECK-LABEL: name: contract_s32_gpr
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
; CHECK: STRWui [[COPY1]], [[COPY]], 0 :: (store 4 into %ir.addr)
; CHECK: STRWui [[COPY1]], [[COPY]], 0 :: (store (s32) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:gpr(s32) = COPY $w1
%2:fpr(s32) = COPY %1
G_STORE %2:fpr(s32), %0 :: (store 4 into %ir.addr)
G_STORE %2:fpr(s32), %0 :: (store (s32) into %ir.addr)
...
---
name: contract_s64_fpr
@ -52,11 +52,11 @@ body: |
; CHECK-LABEL: name: contract_s64_fpr
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store (s64) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:fpr(s64) = COPY $d1
%2:gpr(s64) = COPY %1
G_STORE %2:gpr(s64), %0 :: (store 8 into %ir.addr)
G_STORE %2:gpr(s64), %0 :: (store (s64) into %ir.addr)
...
---
name: contract_s32_fpr
@ -68,11 +68,11 @@ body: |
; CHECK-LABEL: name: contract_s32_fpr
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:fpr32 = COPY $s1
; CHECK: STRSui [[COPY1]], [[COPY]], 0 :: (store 4 into %ir.addr)
; CHECK: STRSui [[COPY1]], [[COPY]], 0 :: (store (s32) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:fpr(s32) = COPY $s1
%2:gpr(s32) = COPY %1
G_STORE %2:gpr(s32), %0 :: (store 4 into %ir.addr)
G_STORE %2:gpr(s32), %0 :: (store (s32) into %ir.addr)
...
---
name: contract_s16_fpr
@ -84,11 +84,11 @@ body: |
; CHECK-LABEL: name: contract_s16_fpr
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:fpr16 = COPY $h1
; CHECK: STRHui [[COPY1]], [[COPY]], 0 :: (store 2 into %ir.addr)
; CHECK: STRHui [[COPY1]], [[COPY]], 0 :: (store (s16) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:fpr(s16) = COPY $h1
%2:gpr(s16) = COPY %1
G_STORE %2:gpr(s16), %0 :: (store 2 into %ir.addr)
G_STORE %2:gpr(s16), %0 :: (store (s16) into %ir.addr)
...
---
name: contract_g_unmerge_values_first
@ -101,13 +101,13 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY [[LOAD]].dsub
; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store (s64) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:fpr(<2 x s64>) = G_LOAD %0:gpr(p0) :: (dereferenceable load 16 from %ir.addr)
%1:fpr(<2 x s64>) = G_LOAD %0:gpr(p0) :: (dereferenceable load (<2 x s64>) from %ir.addr)
%2:fpr(s64), %3:fpr(s64) = G_UNMERGE_VALUES %1:fpr(<2 x s64>)
%4:gpr(s64) = COPY %2
%5:gpr(s64) = COPY %3
G_STORE %4:gpr(s64), %0 :: (store 8 into %ir.addr)
G_STORE %4:gpr(s64), %0 :: (store (s64) into %ir.addr)
...
---
name: contract_g_unmerge_values_second
@ -120,10 +120,10 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:fpr128 = LDRQui [[COPY]], 0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = CPYi64 [[LOAD]], 1
; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store 8 into %ir.addr)
; CHECK: STRDui [[COPY1]], [[COPY]], 0 :: (store (s64) into %ir.addr)
%0:gpr(p0) = COPY $x0
%1:fpr(<2 x s64>) = G_LOAD %0:gpr(p0) :: (dereferenceable load 16 from %ir.addr)
%1:fpr(<2 x s64>) = G_LOAD %0:gpr(p0) :: (dereferenceable load (<2 x s64>) from %ir.addr)
%2:fpr(s64), %3:fpr(s64) = G_UNMERGE_VALUES %1:fpr(<2 x s64>)
%4:gpr(s64) = COPY %2
%5:gpr(s64) = COPY %3
G_STORE %5:gpr(s64), %0 :: (store 8 into %ir.addr)
G_STORE %5:gpr(s64), %0 :: (store (s64) into %ir.addr)

View File

@ -36,7 +36,7 @@ body: |
%0:_(p0) = G_IMPLICIT_DEF debug-location !DILocation(line: 0, scope: !6)
%1:_(s8) = G_CONSTANT i8 0
%2:_(s64) = G_IMPLICIT_DEF debug-location !DILocation(line: 0, scope: !6)
G_MEMSET %0(p0), %1(s8), %2(s64), 1, debug-location !11 :: (store 1)
G_MEMSET %0(p0), %1(s8), %2(s64), 1, debug-location !11 :: (store (s8))
DBG_VALUE 0, 0, !9, !DIExpression(), debug-location !12
RET_ReallyLR debug-location !12

View File

@ -85,8 +85,8 @@ body: |
; CHECK: %val1:_(s64) = COPY $x0
; CHECK: %val2:_(s64) = COPY $x1
; CHECK: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @g + 1
; CHECK: G_STORE %val1(s64), [[GV]](p0) :: (store 8)
; CHECK: G_STORE %val2(s64), [[GV]](p0) :: (store 8)
; CHECK: G_STORE %val1(s64), [[GV]](p0) :: (store (s64))
; CHECK: G_STORE %val2(s64), [[GV]](p0) :: (store (s64))
; CHECK: RET_ReallyLR implicit $x0
%val1:_(s64) = COPY $x0
%val2:_(s64) = COPY $x1
@ -94,8 +94,8 @@ body: |
%offset:_(s64) = G_CONSTANT i64 1
%ptr_add1:_(p0) = G_PTR_ADD %global, %offset(s64)
%ptr_add2:_(p0) = G_PTR_ADD %global, %offset(s64)
G_STORE %val1:_(s64), %ptr_add1 :: (store 8)
G_STORE %val2:_(s64), %ptr_add2 :: (store 8)
G_STORE %val1:_(s64), %ptr_add1 :: (store (s64))
G_STORE %val2:_(s64), %ptr_add2 :: (store (s64))
RET_ReallyLR implicit $x0
...
@ -118,8 +118,8 @@ body: |
; CHECK: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @g + 2
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: %ptr_add2:_(p0) = G_PTR_ADD [[GV]], [[C]](s64)
; CHECK: G_STORE %val1(s64), [[GV]](p0) :: (store 8)
; CHECK: G_STORE %val2(s64), %ptr_add2(p0) :: (store 8)
; CHECK: G_STORE %val1(s64), [[GV]](p0) :: (store (s64))
; CHECK: G_STORE %val2(s64), %ptr_add2(p0) :: (store (s64))
; CHECK: RET_ReallyLR implicit $x0
%val1:_(s64) = COPY $x0
%val2:_(s64) = COPY $x1
@ -128,8 +128,8 @@ body: |
%offset2:_(s64) = G_CONSTANT i64 10
%ptr_add1:_(p0) = G_PTR_ADD %global, %offset1(s64)
%ptr_add2:_(p0) = G_PTR_ADD %global, %offset2(s64)
G_STORE %val1:_(s64), %ptr_add1 :: (store 8)
G_STORE %val2:_(s64), %ptr_add2 :: (store 8)
G_STORE %val1:_(s64), %ptr_add1 :: (store (s64))
G_STORE %val2:_(s64), %ptr_add2 :: (store (s64))
RET_ReallyLR implicit $x0
...

View File

@ -36,8 +36,8 @@ body: |
; CHECK: ret
%0:_(s128) = COPY $q0
%1:_(p0) = G_FRAME_INDEX %stack.0.a.addr
G_STORE %0(s128), %1(p0) :: (store 16 into %ir.a.addr)
%2:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.a.addr)
G_STORE %0(s128), %1(p0) :: (store (s128) into %ir.a.addr)
%2:_(s128) = G_LOAD %1(p0) :: (load (s128) from %ir.a.addr)
%3:_(s128) = G_FNEG %2
$q0 = COPY %3(s128)
RET_ReallyLR implicit $q0

View File

@ -55,7 +55,7 @@ body: |
; CHECK: [[COPY3:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG1]]
; CHECK: [[BFMWri1:%[0-9]+]]:gpr32 = BFMWri [[BFMWri]], [[COPY3]], 16, 15
; CHECK: [[COPY4:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: STRWui [[BFMWri1]], [[COPY4]], 0 :: (store 4 into %ir.addr, align 2)
; CHECK: STRWui [[BFMWri1]], [[COPY4]], 0 :: (store (s32) into %ir.addr, align 2)
; CHECK: RET_ReallyLR
%1:fpr(s16) = COPY $h0
%2:fpr(s16) = COPY $h1
@ -66,7 +66,7 @@ body: |
%5:gpr(s32) = G_INSERT %4, %12(s16), 16
%0:gpr(s32) = COPY %5(s32)
%6:gpr(p0) = COPY $x0
G_STORE %0(s32), %6(p0) :: (store 4 into %ir.addr, align 2)
G_STORE %0(s32), %6(p0) :: (store (s32) into %ir.addr, align 2)
RET_ReallyLR
...

View File

@ -34,53 +34,53 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: G_STORE [[LOAD3]](s128), [[PTR_ADD5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load 16 from %ir.1 + 64, align 4)
; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
; CHECK: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store 16 into %ir.0 + 64, align 4)
; CHECK: G_STORE [[LOAD4]](s128), [[PTR_ADD7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
; CHECK: [[PTR_ADD8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load 16 from %ir.1 + 80, align 4)
; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
; CHECK: [[PTR_ADD9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store 16 into %ir.0 + 80, align 4)
; CHECK: G_STORE [[LOAD5]](s128), [[PTR_ADD9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
; CHECK: [[PTR_ADD10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load 16 from %ir.1 + 96, align 4)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
; CHECK: [[PTR_ADD11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store 16 into %ir.0 + 96, align 4)
; CHECK: G_STORE [[LOAD6]](s128), [[PTR_ADD11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
; CHECK: [[PTR_ADD12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load 16 from %ir.1 + 112, align 4)
; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
; CHECK: [[PTR_ADD13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store 16 into %ir.0 + 112, align 4)
; CHECK: G_STORE [[LOAD7]](s128), [[PTR_ADD13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
; CHECK: [[PTR_ADD14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load 16 from %ir.1 + 127, align 1, basealign 4)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
; CHECK: [[PTR_ADD15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store 16 into %ir.0 + 127, align 1, basealign 4)
; CHECK: G_STORE [[LOAD8]](s128), [[PTR_ADD15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 143
G_MEMCPY_INLINE %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY_INLINE %0(p0), %1(p0), %2(s64) :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...

View File

@ -70,12 +70,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = COPY $x2
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -96,33 +96,33 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load 8 from %ir.1 + 64, align 4)
; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store 8 into %ir.0 + 64, align 4)
; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 72
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -143,33 +143,33 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load 8 from %ir.1 + 64, align 4)
; CHECK: [[LOAD4:%[0-9]+]]:_(s64) = G_LOAD [[GEP6]](p0) :: (load (s64) from %ir.1 + 64, align 4)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store 8 into %ir.0 + 64, align 4)
; CHECK: G_STORE [[LOAD4]](s64), [[GEP7]](p0) :: (store (s64) into %ir.0 + 64, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 72
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -191,12 +191,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 72
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -217,53 +217,53 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load 16 from %ir.1 + 48, align 4)
; CHECK: [[LOAD3:%[0-9]+]]:_(s128) = G_LOAD [[GEP4]](p0) :: (load (s128) from %ir.1 + 48, align 4)
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store 16 into %ir.0 + 48, align 4)
; CHECK: G_STORE [[LOAD3]](s128), [[GEP5]](p0) :: (store (s128) into %ir.0 + 48, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
; CHECK: [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[GEP6]](p0) :: (load 16 from %ir.1 + 64, align 4)
; CHECK: [[LOAD4:%[0-9]+]]:_(s128) = G_LOAD [[GEP6]](p0) :: (load (s128) from %ir.1 + 64, align 4)
; CHECK: [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD4]](s128), [[GEP7]](p0) :: (store 16 into %ir.0 + 64, align 4)
; CHECK: G_STORE [[LOAD4]](s128), [[GEP7]](p0) :: (store (s128) into %ir.0 + 64, align 4)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 80
; CHECK: [[GEP8:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C4]](s64)
; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[GEP8]](p0) :: (load 16 from %ir.1 + 80, align 4)
; CHECK: [[LOAD5:%[0-9]+]]:_(s128) = G_LOAD [[GEP8]](p0) :: (load (s128) from %ir.1 + 80, align 4)
; CHECK: [[GEP9:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK: G_STORE [[LOAD5]](s128), [[GEP9]](p0) :: (store 16 into %ir.0 + 80, align 4)
; CHECK: G_STORE [[LOAD5]](s128), [[GEP9]](p0) :: (store (s128) into %ir.0 + 80, align 4)
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
; CHECK: [[GEP10:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C5]](s64)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[GEP10]](p0) :: (load 16 from %ir.1 + 96, align 4)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[GEP10]](p0) :: (load (s128) from %ir.1 + 96, align 4)
; CHECK: [[GEP11:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK: G_STORE [[LOAD6]](s128), [[GEP11]](p0) :: (store 16 into %ir.0 + 96, align 4)
; CHECK: G_STORE [[LOAD6]](s128), [[GEP11]](p0) :: (store (s128) into %ir.0 + 96, align 4)
; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 112
; CHECK: [[GEP12:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C6]](s64)
; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[GEP12]](p0) :: (load 16 from %ir.1 + 112, align 4)
; CHECK: [[LOAD7:%[0-9]+]]:_(s128) = G_LOAD [[GEP12]](p0) :: (load (s128) from %ir.1 + 112, align 4)
; CHECK: [[GEP13:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C6]](s64)
; CHECK: G_STORE [[LOAD7]](s128), [[GEP13]](p0) :: (store 16 into %ir.0 + 112, align 4)
; CHECK: G_STORE [[LOAD7]](s128), [[GEP13]](p0) :: (store (s128) into %ir.0 + 112, align 4)
; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 127
; CHECK: [[GEP14:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C7]](s64)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load 16 from %ir.1 + 127, align 1, basealign 4)
; CHECK: [[LOAD8:%[0-9]+]]:_(s128) = G_LOAD [[GEP14]](p0) :: (load (s128) from %ir.1 + 127, align 1, basealign 4)
; CHECK: [[GEP15:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C7]](s64)
; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store 16 into %ir.0 + 127, align 1, basealign 4)
; CHECK: G_STORE [[LOAD8]](s128), [[GEP15]](p0) :: (store (s128) into %ir.0 + 127, align 1, basealign 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 143
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...

View File

@ -55,12 +55,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = COPY $x2
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -76,25 +76,25 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP2]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD2]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 48
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -111,12 +111,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: G_MEMMOVE [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 96
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -132,31 +132,31 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.1 + 32, align 4)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD1]](p0) :: (load (s128) from %ir.1 + 32, align 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.1 + 48)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C2]](s64)
; CHECK: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD2]](p0) :: (load (s32) from %ir.1 + 48)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP3]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD3]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[GEP4]](p0) :: (store 16 into %ir.0 + 32, align 4)
; CHECK: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C4]](s64)
; CHECK: G_STORE [[LOAD2]](s128), [[PTR_ADD4]](p0) :: (store (s128) into %ir.0 + 32, align 4)
; CHECK: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK: G_STORE [[LOAD3]](s32), [[GEP5]](p0) :: (store 4 into %ir.0 + 48)
; CHECK: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C5]](s64)
; CHECK: G_STORE [[LOAD3]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %ir.0 + 48)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 52
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMMOVE %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...

View File

@ -69,14 +69,14 @@ body: |
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
; CHECK: G_MEMSET [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store 1 into %ir.dst)
; CHECK: G_MEMSET [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store (s8) into %ir.dst)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s32) = COPY $w2
%3:_(s8) = G_TRUNC %1(s32)
%4:_(s64) = G_ZEXT %2(s32)
G_MEMSET %0(p0), %3(s8), %4(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %3(s8), %4(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR
...
@ -96,16 +96,16 @@ body: |
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 16
%2:_(s8) = G_TRUNC %1(s32)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR
...
@ -122,22 +122,22 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store 16 into %ir.dst, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (s128) into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into %ir.dst + 16, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into %ir.dst + 16, align 1)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 into %ir.dst + 32, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (s128) into %ir.dst + 32, align 1)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store 16 into %ir.dst + 48, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (s128) into %ir.dst + 48, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%3:_(s64) = G_CONSTANT i64 64
%2:_(s8) = G_TRUNC %1(s32)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR
...
@ -154,15 +154,15 @@ body: |
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 16
G_MEMSET %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %1(s8), %2(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR
...
@ -183,22 +183,22 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store 16 into %ir.dst, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[COPY]](p0) :: (store (s128) into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into %ir.dst + 16, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into %ir.dst + 16, align 1)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 into %ir.dst + 32, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (s128) into %ir.dst + 32, align 1)
; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 44
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C3]](s64)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store 16 into %ir.dst + 44, align 1)
; CHECK: G_STORE [[BUILD_VECTOR]](<2 x s64>), [[PTR_ADD2]](p0) :: (store (s128) into %ir.dst + 44, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 60
%2:_(s8) = G_TRUNC %1(s32)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR
...
@ -214,19 +214,19 @@ body: |
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4629771061636907072
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: G_STORE [[C]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: G_STORE [[C]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s64)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C2]](s64)
; CHECK: G_STORE [[TRUNC]](s16), [[PTR_ADD1]](p0) :: (store 2 into %ir.dst + 16, align 1)
; CHECK: G_STORE [[TRUNC]](s16), [[PTR_ADD1]](p0) :: (store (s16) into %ir.dst + 16, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 64
%2:_(s64) = G_CONSTANT i64 18
G_MEMSET %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %1(s8), %2(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR
...
@ -245,14 +245,14 @@ body: |
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s8)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 72340172838076673
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C]]
; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store 8 into %ir.dst, align 1)
; CHECK: G_STORE [[MUL]](s64), [[COPY]](p0) :: (store (s64) into %ir.dst, align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store 8 into %ir.dst + 8, align 1)
; CHECK: G_STORE [[MUL]](s64), [[PTR_ADD]](p0) :: (store (s64) into %ir.dst + 8, align 1)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%3:_(s64) = G_CONSTANT i64 16
%2:_(s8) = G_TRUNC %1(s32)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
G_MEMSET %0(p0), %2(s8), %3(s64), 1 :: (store (s8) into %ir.dst)
RET_ReallyLR

View File

@ -42,18 +42,18 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store 16 into %ir.0, align 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.1, align 4)
; CHECK: G_STORE [[LOAD]](s128), [[COPY]](p0) :: (store (s128) into %ir.0, align 4)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p0) :: (load 16 from %ir.1 + 16, align 4)
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[GEP1]](p0) :: (store 16 into %ir.0 + 16, align 4)
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from %ir.1 + 16, align 4)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
; CHECK: G_STORE [[LOAD1]](s128), [[PTR_ADD1]](p0) :: (store (s128) into %ir.0 + 16, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 32
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...
@ -75,12 +75,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s64) = G_CONSTANT i64 36
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
G_MEMCPY %0(p0), %1(p0), %2(s64), 1 :: (store (s8) into %ir.0, align 4), (load (s8) from %ir.1, align 4)
RET_ReallyLR
...

View File

@ -18,12 +18,12 @@ declare void @use_s128(i128 %a, i128 %b)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY $w5
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY $w6
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.2, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.2, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (s64) from %fixed-stack.1)
; CHECK: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64)
; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load (s32) from %fixed-stack.0, align 16)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)

View File

@ -7,7 +7,7 @@ define i32 @atomicrmw_volatile(i32* %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile load store monotonic 4 on %ir.ptr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile load store monotonic (s32) on %ir.ptr)
; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%oldval = atomicrmw volatile add i32* %ptr, i32 1 monotonic
@ -20,7 +20,7 @@ define i32 @atomicrmw_falkor(i32* %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: ("aarch64-strided-access" load store monotonic 4 on %ir.ptr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: ("aarch64-strided-access" load store monotonic (s32) on %ir.ptr)
; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%oldval = atomicrmw add i32* %ptr, i32 1 monotonic, !falkor.strided.access !0
@ -33,7 +33,7 @@ define i32 @atomicrmw_volatile_falkor(i32* %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile "aarch64-strided-access" load store monotonic 4 on %ir.ptr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (volatile "aarch64-strided-access" load store monotonic (s32) on %ir.ptr)
; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%oldval = atomicrmw volatile add i32* %ptr, i32 1 monotonic, !falkor.strided.access !0
@ -47,7 +47,7 @@ define i32 @cmpxchg_volatile(i32* %addr) {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile load store monotonic monotonic 4 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile load store monotonic monotonic (s32) on %ir.addr)
; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
; CHECK: RET_ReallyLR implicit $w0
%val_success = cmpxchg volatile i32* %addr, i32 0, i32 1 monotonic monotonic
@ -62,7 +62,7 @@ define i32 @cmpxchg_falkor(i32* %addr) {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: ("aarch64-strided-access" load store monotonic monotonic 4 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: ("aarch64-strided-access" load store monotonic monotonic (s32) on %ir.addr)
; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
; CHECK: RET_ReallyLR implicit $w0
%val_success = cmpxchg i32* %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0
@ -77,7 +77,7 @@ define i32 @cmpxchg_volatile_falkor(i32* %addr) {
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile "aarch64-strided-access" load store monotonic monotonic 4 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p0), [[C]], [[C1]] :: (volatile "aarch64-strided-access" load store monotonic monotonic (s32) on %ir.addr)
; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
; CHECK: RET_ReallyLR implicit $w0
%val_success = cmpxchg volatile i32* %addr, i32 0, i32 1 monotonic monotonic, !falkor.strided.access !0

View File

@ -101,7 +101,7 @@ continue:
; CHECK: [[BAD]].{{[a-z]+}} (landing-pad):
; CHECK: [[PHI_ELEVEN:%[0-9]+]]:_(s32) = G_PHI [[ELEVEN]](s32), %bb.1
; CHECK: EH_LABEL
; CHECK: G_STORE [[PHI_ELEVEN]](s32), {{%[0-9]+}}(p0) :: (store 4 into @global_var)
; CHECK: G_STORE [[PHI_ELEVEN]](s32), {{%[0-9]+}}(p0) :: (store (s32) into @global_var)
; CHECK: [[GOOD]].{{[a-z]+}}:
; CHECK: [[SEL:%[0-9]+]]:_(s32) = G_PHI

View File

@ -6,7 +6,7 @@ define i32 @load_invariant(i32* %ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (invariant load 4 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (invariant load (s32) from %ir.ptr)
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%load = load i32, i32* %ptr, align 4, !invariant.load !0
@ -18,7 +18,7 @@ define i32 @load_volatile_invariant(i32* %ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (volatile invariant load 4 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (volatile invariant load (s32) from %ir.ptr)
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%load = load volatile i32, i32* %ptr, align 4, !invariant.load !0
@ -30,7 +30,7 @@ define i32 @load_dereferenceable(i32* dereferenceable(4) %ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable load 4 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable load (s32) from %ir.ptr)
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%load = load i32, i32* %ptr, align 4
@ -42,7 +42,7 @@ define i32 @load_dereferenceable_invariant(i32* dereferenceable(4) %ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable invariant load 4 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable invariant load (s32) from %ir.ptr)
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%load = load i32, i32* %ptr, align 4, !invariant.load !0
@ -54,7 +54,7 @@ define i32 @load_nontemporal(i32* %ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (non-temporal load 4 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (non-temporal load (s32) from %ir.ptr)
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%load = load i32, i32* %ptr, align 4, !nontemporal !0
@ -66,7 +66,7 @@ define i32 @load_falkor_strided_access(i32* %ptr) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: ("aarch64-strided-access" load 4 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: ("aarch64-strided-access" load (s32) from %ir.ptr)
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%load = load i32, i32* %ptr, align 4, !falkor.strided.access !0

View File

@ -10,8 +10,8 @@ define void @local_escape() {
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.a
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.b
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store 4 into %ir.a)
; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.b)
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.a)
; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.b)
; CHECK: RET_ReallyLR
%a = alloca i32
%b = alloca i32, i32 2
@ -31,8 +31,8 @@ define void @local_escape_insert_point() {
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.a
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.b
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store 4 into %ir.a)
; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.b)
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.a)
; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.b)
; CHECK: RET_ReallyLR
%a = alloca i32
%b = alloca i32, i32 2
@ -51,7 +51,7 @@ define void @local_escape_strip_ptr_cast() {
; CHECK: LOCAL_ESCAPE <mcsymbol .Llocal_escape_strip_ptr_cast$frame_escape_0>, %stack.0.a
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.a
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store 4 into %ir.cast)
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %ir.cast)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: $x0 = COPY [[FRAME_INDEX]](p0)
; CHECK: BL @foo, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0

View File

@ -2,7 +2,7 @@
; CHECK-LABEL: name: store_max_address_space
; CHECK: %0:_(p16777215) = COPY $x0
; CHECK: G_STORE %1(s32), %0(p16777215) :: (store 4 into %ir.ptr, addrspace 16777215)
; CHECK: G_STORE %1(s32), %0(p16777215) :: (store (s32) into %ir.ptr, addrspace 16777215)
define void @store_max_address_space(i32 addrspace(16777215)* %ptr) {
store i32 0, i32 addrspace(16777215)* %ptr
ret void
@ -11,7 +11,7 @@ define void @store_max_address_space(i32 addrspace(16777215)* %ptr) {
; CHECK-LABEL: name: store_max_address_space_vector
; CHECK: %0:_(<2 x p16777215>) = COPY $q0
; CHECK: %1:_(p16777215) = G_EXTRACT_VECTOR_ELT %0(<2 x p16777215>), %2(s64)
; CHECK: %1(p16777215) :: (store 4 into %ir.elt0, addrspace 16777215)
; CHECK: %1(p16777215) :: (store (s32) into %ir.elt0, addrspace 16777215)
define void @store_max_address_space_vector(<2 x i32 addrspace(16777215)*> %vptr) {
%elt0 = extractelement <2 x i32 addrspace(16777215)*> %vptr, i32 0
store i32 0, i32 addrspace(16777215)* %elt0
@ -19,7 +19,7 @@ define void @store_max_address_space_vector(<2 x i32 addrspace(16777215)*> %vptr
}
; CHECK-LABEL: name: max_address_space_vector_max_num_elts
; CHECK: %0:_(<65535 x p16777215>) = G_LOAD %1(p0) :: (volatile load 524280 from `<65535 x i32 addrspace(16777215)*>* undef`, align 524288)
; CHECK: %0:_(<65535 x p16777215>) = G_LOAD %1(p0) :: (volatile load (<65535 x p16777215>) from `<65535 x i32 addrspace(16777215)*>* undef`, align 524288)
define void @max_address_space_vector_max_num_elts() {
%load = load volatile <65535 x i32 addrspace(16777215)*>, <65535 x i32 addrspace(16777215)*>* undef
ret void

View File

@ -9,7 +9,7 @@ define void @copy(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
@ -24,7 +24,7 @@ define void @inline_copy(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
@ -39,7 +39,7 @@ define void @copy_volatile(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (volatile store 1 into %ir.dst), (volatile load 1 from %ir.src)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 0 :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
@ -54,7 +54,7 @@ define void @inline_copy_volatile(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store 1 into %ir.dst), (volatile load 1 from %ir.src)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
@ -69,7 +69,7 @@ define void @tail_copy(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
@ -84,7 +84,7 @@ define void @tail_inline_copy(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (store (s8) into %ir.dst), (load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
tail call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 false)
@ -99,7 +99,7 @@ define void @tail_copy_volatile(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (volatile store 1 into %ir.dst), (volatile load 1 from %ir.src)
; CHECK: G_MEMCPY [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64), 1 :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)
@ -114,7 +114,7 @@ define void @tail_inline_copy_volatile(i8* %dst, i8* %src) {
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store 1 into %ir.dst), (volatile load 1 from %ir.src)
; CHECK: G_MEMCPY_INLINE [[COPY]](p0), [[COPY1]](p0), [[ZEXT]](s64) :: (volatile store (s8) into %ir.dst), (volatile load (s8) from %ir.src)
; CHECK: RET_ReallyLR
entry:
tail call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* %dst, i8* %src, i32 4, i1 true)

View File

@ -17,7 +17,7 @@ define i3 @bug47619(i64 %arg, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %a
; CHECK: [[COPY6:%[0-9]+]]:_(s64) = COPY $x6
; CHECK: [[COPY7:%[0-9]+]]:_(s64) = COPY $x7
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s32) from %fixed-stack.0, align 16)
; CHECK: [[TRUNC:%[0-9]+]]:_(s3) = G_TRUNC [[LOAD]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s3)
; CHECK: $w0 = COPY [[ANYEXT]](s32)

View File

@ -13,12 +13,12 @@ define void @stack_passed_i64(i64 %arg, i64 %arg1, i64 %arg2, i64 %arg3, i64 %ar
i64 %arg7, i64 %arg8, i64* byval(i64) %arg9) {
; CHECK: bb.1 (%ir-block.0):
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s64) from %fixed-stack.1, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; CHECK: [[COPY8:%[0-9]+]]:_(p0) = COPY [[FRAME_INDEX1]](p0)
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY8]](p0) :: (dereferenceable load 8 from %ir.arg9)
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY8]](p0) :: (dereferenceable load (s64) from %ir.arg9)
; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[LOAD1]], [[LOAD]]
; CHECK: G_STORE [[ADD]](s64), [[COPY8]](p0) :: (volatile store 8 into %ir.arg9)
; CHECK: G_STORE [[ADD]](s64), [[COPY8]](p0) :: (volatile store (s64) into %ir.arg9)
; CHECK: RET_ReallyLR
%load = load i64, i64* %arg9
%add = add i64 %load, %arg8

View File

@ -27,12 +27,12 @@ define void @foo() ssp {
; CHECK-MIR: bb.1.entry:
; CHECK-MIR: %0:_(p0) = G_FRAME_INDEX %stack.0.StackGuardSlot
; CHECK-MIR-NEXT: %1:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard)
; CHECK-MIR-NEXT: %2:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard)
; CHECK-MIR-NEXT: G_STORE %2(p0), %0(p0) :: (volatile store 8 into %stack.0.StackGuardSlot)
; CHECK-MIR-NEXT: %1:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load (p0) from @__stack_chk_guard)
; CHECK-MIR-NEXT: %2:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load (p0) from @__stack_chk_guard)
; CHECK-MIR-NEXT: G_STORE %2(p0), %0(p0) :: (volatile store (p0) into %stack.0.StackGuardSlot)
; CHECK-MIR-NEXT: %3:_(p0) = G_FRAME_INDEX %stack.1.buf
; CHECK-MIR-NEXT: %4:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load 8 from @__stack_chk_guard)
; CHECK-MIR-NEXT: %5:_(p0) = G_LOAD %0(p0) :: (volatile dereferenceable load 8 from %ir.StackGuardSlot)
; CHECK-MIR-NEXT: %4:gpr64sp(p0) = LOAD_STACK_GUARD :: (dereferenceable invariant load (p0) from @__stack_chk_guard)
; CHECK-MIR-NEXT: %5:_(p0) = G_LOAD %0(p0) :: (volatile dereferenceable load (p0) from %ir.StackGuardSlot)
; CHECK-MIR-NEXT: %6:_(s1) = G_ICMP intpred(eq), %4(p0), %5
; CHECK-MIR-NEXT: G_BRCOND %6(s1), %bb.2
; CHECK-MIR-NEXT: G_BR %bb.3

View File

@ -7,7 +7,7 @@ define void @store_nontemporal(i32* dereferenceable(4) %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (non-temporal store 4 into %ir.ptr)
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (non-temporal store (s32) into %ir.ptr)
; CHECK: RET_ReallyLR
store i32 0, i32* %ptr, align 4, !nontemporal !0
ret void
@ -19,7 +19,7 @@ define void @store_dereferenceable(i32* dereferenceable(4) %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (store 4 into %ir.ptr)
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (store (s32) into %ir.ptr)
; CHECK: RET_ReallyLR
store i32 0, i32* %ptr, align 4
ret void
@ -31,7 +31,7 @@ define void @store_volatile_dereferenceable(i32* dereferenceable(4) %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (volatile store 4 into %ir.ptr)
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: (volatile store (s32) into %ir.ptr)
; CHECK: RET_ReallyLR
store volatile i32 0, i32* %ptr, align 4
ret void
@ -43,7 +43,7 @@ define void @store_falkor_strided_access(i32* %ptr) {
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: ("aarch64-strided-access" store 4 into %ir.ptr)
; CHECK: G_STORE [[C]](s32), [[COPY]](p0) :: ("aarch64-strided-access" store (s32) into %ir.ptr)
; CHECK: RET_ReallyLR
store i32 0, i32* %ptr, align 4, !falkor.strided.access !0
ret void

View File

@ -202,7 +202,7 @@ define void @bit_test_block_incomplete_phi() {
; CHECK: successors:
; CHECK: bb.3.if.end:
; CHECK: successors: %bb.4(0x80000000)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[DEF1]](p0) :: (load 8 from `i8** undef`)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[DEF1]](p0) :: (load (p0) from `i8** undef`)
; CHECK: bb.4.return:
; CHECK: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[C]](s1), %bb.3, [[C1]](s1), %bb.5
; CHECK: RET_ReallyLR

View File

@ -8,8 +8,8 @@ bb:
%tmp5 = getelementptr i16, i16* null, i64 2
%tmp6 = load i16, i16* %tmp1, align 2, !tbaa !0
store i16 %tmp6, i16* %tmp5, align 2, !tbaa !0
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD %{{[0-9]+}}(p0) :: (load 2 from %ir.tmp1, !tbaa !0)
; CHECK: G_STORE [[LOAD]](s16), %{{[0-9]+}}(p0) :: (store 2 into %ir.tmp5, !tbaa !0)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD %{{[0-9]+}}(p0) :: (load (s16) from %ir.tmp1, !tbaa !0)
; CHECK: G_STORE [[LOAD]](s16), %{{[0-9]+}}(p0) :: (store (s16) into %ir.tmp5, !tbaa !0)
ret void
}

View File

@ -18,8 +18,8 @@ body: |
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 13
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store 4)
; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store 4)
; CHECK: G_STORE [[C]](s32), [[FRAME_INDEX]](p0) :: (store (s32))
; CHECK: G_STORE [[C1]](s32), [[FRAME_INDEX1]](p0) :: (store (s32))
; CHECK: RET_ReallyLR
LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_0>, %stack.0
LOCAL_ESCAPE <mcsymbol .Llocal_escape$frame_escape_1>, %stack.1
@ -27,8 +27,8 @@ body: |
%3:_(s32) = G_CONSTANT i32 13
%0:_(p0) = G_FRAME_INDEX %stack.0
%1:_(p0) = G_FRAME_INDEX %stack.1
G_STORE %2(s32), %0(p0) :: (store 4)
G_STORE %3(s32), %1(p0) :: (store 4)
G_STORE %2(s32), %0(p0) :: (store (s32))
G_STORE %3(s32), %1(p0) :: (store (s32))
RET_ReallyLR
...

View File

@ -19,12 +19,12 @@ body: |
; CHECK-LABEL: name: cmpxchg_i8
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s8) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic 1 on %ir.addr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s8) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s8) on %ir.addr)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ATOMICRMW_ADD]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 1
%2:_(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 1 on %ir.addr)
%2:_(s8) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s8) on %ir.addr)
%3:_(s32) = G_ANYEXT %2
$w0 = COPY %3(s32)
...
@ -38,12 +38,12 @@ body: |
; CHECK-LABEL: name: cmpxchg_i16
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s16) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic 2 on %ir.addr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s16) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s16) on %ir.addr)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ATOMICRMW_ADD]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
%0:_(p0) = COPY $x0
%1:_(s16) = G_CONSTANT i16 1
%2:_(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 2 on %ir.addr)
%2:_(s16) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s16) on %ir.addr)
%3:_(s32) = G_ANYEXT %2
$w0 = COPY %3(s32)
...
@ -57,11 +57,11 @@ body: |
; CHECK-LABEL: name: cmpxchg_i32
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic 4 on %ir.addr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s32) on %ir.addr)
; CHECK: $w0 = COPY [[ATOMICRMW_ADD]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 4 on %ir.addr)
%2:_(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s32) on %ir.addr)
$w0 = COPY %2(s32)
...
@ -74,10 +74,10 @@ body: |
; CHECK-LABEL: name: cmpxchg_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic 8 on %ir.addr)
; CHECK: [[ATOMICRMW_ADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_ADD [[COPY]](p0), [[C]] :: (load store monotonic (s64) on %ir.addr)
; CHECK: $x0 = COPY [[ATOMICRMW_ADD]](s64)
%0:_(p0) = COPY $x0
%1:_(s64) = G_CONSTANT i64 1
%2:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr)
%2:_(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic (s64) on %ir.addr)
$x0 = COPY %2(s64)
...

View File

@ -28,14 +28,14 @@ body: |
; CHECK: [[BLOCK_ADDR:%[0-9]+]]:_(p0) = G_BLOCK_ADDR blockaddress(@test_blockaddress, %ir-block.block)
; CHECK: [[ADRP:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @addr
; CHECK: [[ADD_LOW:%[0-9]+]]:_(p0) = G_ADD_LOW [[ADRP]](p0), target-flags(aarch64-pageoff, aarch64-nc) @addr
; CHECK: G_STORE [[BLOCK_ADDR]](p0), [[ADD_LOW]](p0) :: (store 8 into @addr)
; CHECK: G_STORE [[BLOCK_ADDR]](p0), [[ADD_LOW]](p0) :: (store (p0) into @addr)
; CHECK: G_BRINDIRECT [[BLOCK_ADDR]](p0)
; CHECK: bb.1.block (address-taken):
; CHECK: RET_ReallyLR
bb.1 (%ir-block.0):
%0:_(p0) = G_BLOCK_ADDR blockaddress(@test_blockaddress, %ir-block.block)
%1:_(p0) = G_GLOBAL_VALUE @addr
G_STORE %0(p0), %1(p0) :: (store 8 into @addr)
G_STORE %0(p0), %1(p0) :: (store (p0) into @addr)
G_BRINDIRECT %0(p0)
bb.2.block (address-taken):

View File

@ -22,7 +22,7 @@ body: |
; CHECK: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%width:_(s64) = COPY $x1
G_BZERO %ptr(p0), %width(s64), 0 :: (store 4)
G_BZERO %ptr(p0), %width(s64), 0 :: (store (s32))
RET_ReallyLR
...
@ -41,5 +41,5 @@ body: |
; CHECK: TCRETURNdi &bzero, 0, csr_darwin_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1
%ptr:_(p0) = COPY $x0
%width:_(s64) = COPY $x1
G_BZERO %ptr(p0), %width(s64), 1 :: (store 4)
G_BZERO %ptr(p0), %width(s64), 1 :: (store (s32))
RET_ReallyLR

View File

@ -39,10 +39,10 @@ body: |
; CHECK-NOLSE: [[COPY6:%[0-9]+]]:gpr64(s64) = COPY [[COPY2]](s64)
; CHECK-NOLSE: [[COPY7:%[0-9]+]]:gpr64(s64) = COPY [[COPY3]](s64)
; CHECK-NOLSE: [[COPY8:%[0-9]+]]:gpr64(s64) = COPY [[COPY4]](s64)
; CHECK-NOLSE: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32 = CMP_SWAP_128 [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire 16)
; CHECK-NOLSE: early-clobber %13:gpr64(s64), early-clobber %14:gpr64(s64), early-clobber %16:gpr32 = CMP_SWAP_128 [[COPY]](p0), [[COPY5]](s64), [[COPY6]](s64), [[COPY7]](s64), [[COPY8]](s64) :: (load store acquire acquire (s128))
; CHECK-NOLSE: [[COPY9:%[0-9]+]]:gpr64 = COPY %16
; CHECK-NOLSE: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES %13(s64), %14(s64)
; CHECK-NOLSE: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
; CHECK-NOLSE: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-NOLSE: RET_ReallyLR
; CHECK-LSE-LABEL: name: compare_swap_128
; CHECK-LSE: liveins: $x0_x1, $x1, $x0, $x1, $x2, $x3, $x4
@ -53,11 +53,11 @@ body: |
; CHECK-LSE: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
; CHECK-LSE: [[REG_SEQUENCE:%[0-9]+]]:xseqpairsclass(s128) = REG_SEQUENCE [[COPY1]](s64), %subreg.sube64, [[COPY2]](s64), %subreg.subo64
; CHECK-LSE: [[REG_SEQUENCE1:%[0-9]+]]:xseqpairsclass(s128) = REG_SEQUENCE [[COPY3]](s64), %subreg.sube64, [[COPY4]](s64), %subreg.subo64
; CHECK-LSE: [[CASPAX:%[0-9]+]]:xseqpairsclass(s128) = CASPAX [[REG_SEQUENCE]](s128), [[REG_SEQUENCE1]](s128), [[COPY]](p0) :: (load store acquire acquire 16)
; CHECK-LSE: [[CASPAX:%[0-9]+]]:xseqpairsclass(s128) = CASPAX [[REG_SEQUENCE]](s128), [[REG_SEQUENCE1]](s128), [[COPY]](p0) :: (load store acquire acquire (s128))
; CHECK-LSE: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 0
; CHECK-LSE: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[CASPAX]](s128), 64
; CHECK-LSE: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[EXTRACT]](s64), [[EXTRACT1]](s64)
; CHECK-LSE: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
; CHECK-LSE: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
; CHECK-LSE: RET_ReallyLR
%0:_(p0) = COPY $x0
%3:_(s64) = COPY $x1
@ -66,8 +66,8 @@ body: |
%5:_(s64) = COPY $x3
%6:_(s64) = COPY $x4
%2:_(s128) = G_MERGE_VALUES %5(s64), %6(s64)
%7:_(s128), %8:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0(p0), %1, %2 :: (load store acquire acquire 16)
G_STORE %7(s128), %0(p0) :: (store 16)
%7:_(s128), %8:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0(p0), %1, %2 :: (load store acquire acquire (s128))
G_STORE %7(s128), %0(p0) :: (store (s128))
RET_ReallyLR
...

View File

@ -19,7 +19,7 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[CMP:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[CST:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
; CHECK: [[RES:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic (s64) on %ir.addr)
; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s32), [[CMP]]
; CHECK: [[SRES32:%[0-9]+]]:_(s32) = COPY [[SRES]]
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[RES]], [[SRES32]]
@ -27,7 +27,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_CONSTANT i32 1
%3:_(s32), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
%3:_(s32), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic (s64) on %ir.addr)
%5:_(s32) = G_ANYEXT %4
%6:_(s32) = G_MUL %3, %5
$w0 = COPY %6(s32)
@ -44,7 +44,7 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[CMP:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[CST:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic 8 on %ir.addr)
; CHECK: [[RES:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[CMP]], [[CST]] :: (load store monotonic (s64) on %ir.addr)
; CHECK: [[SRES:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[RES]](s64), [[CMP]]
; CHECK: [[SRES64:%[0-9]+]]:_(s64) = G_ANYEXT [[SRES]]
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[RES]], [[SRES64]]
@ -52,7 +52,7 @@ body: |
%0:_(p0) = COPY $x0
%1:_(s64) = G_CONSTANT i64 0
%2:_(s64) = G_CONSTANT i64 1
%3:_(s64), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
%3:_(s64), %4:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS %0, %1, %2 :: (load store monotonic (s64) on %ir.addr)
%5:_(s64) = G_ANYEXT %4
%6:_(s64) = G_MUL %3, %5
$x0 = COPY %6(s64)

View File

@ -20,13 +20,13 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s8) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 1 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s8) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s8) on %ir.addr)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ATOMIC_CMPXCHG]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_CONSTANT i8 0
%2:_(s8) = G_CONSTANT i8 1
%3:_(s8) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 1 on %ir.addr)
%3:_(s8) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic (s8) on %ir.addr)
%4:_(s32) = G_ANYEXT %3
$w0 = COPY %4(s32)
...
@ -41,13 +41,13 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s16) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 2 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s16) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s16) on %ir.addr)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ATOMIC_CMPXCHG]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
%0:_(p0) = COPY $x0
%1:_(s16) = G_CONSTANT i16 0
%2:_(s16) = G_CONSTANT i16 1
%3:_(s16) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 2 on %ir.addr)
%3:_(s16) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic (s16) on %ir.addr)
%4:_(s32) = G_ANYEXT %3
$w0 = COPY %4(s32)
...
@ -62,12 +62,12 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 4 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s32) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s32) on %ir.addr)
; CHECK: $w0 = COPY [[ATOMIC_CMPXCHG]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_CONSTANT i32 0
%2:_(s32) = G_CONSTANT i32 1
%3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 4 on %ir.addr)
%3:_(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic (s32) on %ir.addr)
$w0 = COPY %3(s32)
...
@ -81,11 +81,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic 8 on %ir.addr)
; CHECK: [[ATOMIC_CMPXCHG:%[0-9]+]]:_(s64) = G_ATOMIC_CMPXCHG [[COPY]](p0), [[C]], [[C1]] :: (load store monotonic (s64) on %ir.addr)
; CHECK: $x0 = COPY [[ATOMIC_CMPXCHG]](s64)
%0:_(p0) = COPY $x0
%1:_(s64) = G_CONSTANT i64 0
%2:_(s64) = G_CONSTANT i64 1
%3:_(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
%3:_(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic (s64) on %ir.addr)
$x0 = COPY %3(s64)
...

View File

@ -18,8 +18,8 @@ declare void @_Unwind_Resume(i8*)
; CHECK: [[PTR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEL_PTR:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[SEL_PTR_INT:%[0-9]+]]:_(s32) = G_PTRTOINT [[SEL_PTR]](p0)
; CHECK: G_STORE [[PTR]](p0), %0(p0) :: (store 8 into %ir.exn.slot)
; CHECK: G_STORE [[SEL_PTR_INT]](s32), %1(p0) :: (store 4 into %ir.ehselector.slot)
; CHECK: G_STORE [[PTR]](p0), %0(p0) :: (store (p0) into %ir.exn.slot)
; CHECK: G_STORE [[SEL_PTR_INT]](s32), %1(p0) :: (store (s32) into %ir.ehselector.slot)
define void @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
%exn.slot = alloca i8*

View File

@ -53,10 +53,10 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_extload
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[LOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_LOAD %0 :: (load 1)
%1:_(s32) = G_LOAD %0 :: (load (s8))
$w0 = COPY %1
...
---
@ -67,11 +67,11 @@ body: |
; CHECK-LABEL: name: sext_i32_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 4 from %ir.ptr)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%2:_(s64) = G_SEXTLOAD %0(p0) :: (load 4 from %ir.ptr)
%2:_(s64) = G_SEXTLOAD %0(p0) :: (load (s32) from %ir.ptr)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0
@ -84,11 +84,11 @@ body: |
; CHECK-LABEL: name: sext_i16_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 2 from %ir.ptr)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s16) from %ir.ptr)
; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%2:_(s64) = G_SEXTLOAD %0(p0) :: (load 2 from %ir.ptr)
%2:_(s64) = G_SEXTLOAD %0(p0) :: (load (s16) from %ir.ptr)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0
@ -101,11 +101,11 @@ body: |
; CHECK-LABEL: name: sext_i8_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 1 from %ir.ptr)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8) from %ir.ptr)
; CHECK: $x0 = COPY [[SEXTLOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%2:_(s64) = G_SEXTLOAD %0(p0) :: (load 1 from %ir.ptr)
%2:_(s64) = G_SEXTLOAD %0(p0) :: (load (s8) from %ir.ptr)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0
@ -118,11 +118,11 @@ body: |
; CHECK-LABEL: name: zext_i32_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load 4 from %ir.ptr)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s32) from %ir.ptr)
; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%2:_(s64) = G_ZEXTLOAD %0(p0) :: (load 4 from %ir.ptr)
%2:_(s64) = G_ZEXTLOAD %0(p0) :: (load (s32) from %ir.ptr)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0
@ -135,11 +135,11 @@ body: |
; CHECK-LABEL: name: zext_i16_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load 2 from %ir.ptr)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16) from %ir.ptr)
; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%2:_(s64) = G_ZEXTLOAD %0(p0) :: (load 2 from %ir.ptr)
%2:_(s64) = G_ZEXTLOAD %0(p0) :: (load (s16) from %ir.ptr)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0
@ -152,11 +152,11 @@ body: |
; CHECK-LABEL: name: zext_i8_i64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load 1 from %ir.ptr)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s64) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8) from %ir.ptr)
; CHECK: $x0 = COPY [[ZEXTLOAD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%2:_(s64) = G_ZEXTLOAD %0(p0) :: (load 1 from %ir.ptr)
%2:_(s64) = G_ZEXTLOAD %0(p0) :: (load (s8) from %ir.ptr)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0

View File

@ -179,16 +179,16 @@ body: |
; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $q1
; CHECK: %idx:_(s64) = COPY $x0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK: G_STORE [[COPY]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store 16 into %stack.0, align 32)
; CHECK: G_STORE [[COPY]](<2 x s64>), [[FRAME_INDEX]](p0) :: (store (s128) into %stack.0, align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64)
; CHECK: G_STORE [[COPY1]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into %stack.0 + 16, basealign 32)
; CHECK: G_STORE [[COPY1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into %stack.0 + 16, basealign 32)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 3
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %idx, [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND]], [[C2]]
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[MUL]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load 8)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[PTR_ADD1]](p0) :: (load (s64))
; CHECK: $x0 = COPY [[LOAD]](s64)
; CHECK: RET_ReallyLR
%0:_(<2 x s64>) = COPY $q0

View File

@ -19,15 +19,15 @@ body: |
; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
; CHECK: [[FPEXT:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV]](<2 x s32>)
; CHECK: [[FPEXT1:%[0-9]+]]:_(<2 x s64>) = G_FPEXT [[UV1]](<2 x s32>)
; CHECK: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store 16, align 32)
; CHECK: G_STORE [[FPEXT]](<2 x s64>), [[COPY1]](p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[FPEXT1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%0:_(<4 x s32>) = COPY $q0
%1:_(p0) = COPY $x0
%2:_(<4 x s64>) = G_FPEXT %0(<4 x s32>)
G_STORE %2(<4 x s64>), %1(p0) :: (store 32)
G_STORE %2(<4 x s64>), %1(p0) :: (store (<4 x s64>))
RET_ReallyLR
...

View File

@ -117,10 +117,10 @@ body: |
; CHECK: [[COPY5:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[FPTRUNC]](<2 x s32>), [[FPTRUNC1]](<2 x s32>)
; CHECK: [[CONCAT_VECTORS1:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[FPTRUNC2]](<2 x s32>), [[FPTRUNC3]](<2 x s32>)
; CHECK: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store 16, align 32)
; CHECK: G_STORE [[CONCAT_VECTORS]](<4 x s32>), [[COPY5]](p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C]](s64)
; CHECK: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[CONCAT_VECTORS1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%2:_(<2 x s64>) = COPY $q0
%3:_(<2 x s64>) = COPY $q1
@ -130,6 +130,6 @@ body: |
%1:_(p0) = COPY $x0
%6:_(<8 x s32>) = G_FPTRUNC %0(<8 x s64>)
%7:_(p0) = COPY $x0
G_STORE %6(<8 x s32>), %7(p0) :: (store 32)
G_STORE %6(<8 x s32>), %7(p0) :: (store (<8 x s32>))
RET_ReallyLR
...

View File

@ -12,12 +12,12 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: G_STORE [[LOAD]](<4 x s32>), [[COPY1]](p0) :: (store 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; CHECK: G_STORE [[LOAD]](<4 x s32>), [[COPY1]](p0) :: (store (<4 x s32>))
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16)
G_STORE %2(<4 x s32>), %1(p0) :: (store 16)
%2:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>))
G_STORE %2(<4 x s32>), %1(p0) :: (store (<4 x s32>))
...
---
@ -32,11 +32,11 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: G_STORE [[LOAD]](<2 x s64>), [[COPY1]](p0) :: (store 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
; CHECK: G_STORE [[LOAD]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x s64>))
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16)
G_STORE %2(<2 x s64>), %1(p0) :: (store 16)
%2:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>))
G_STORE %2(<2 x s64>), %1(p0) :: (store (<2 x s64>))
...

View File

@ -46,7 +46,7 @@ body: |
; CHECK: RET_ReallyLR debug-location !DILocation(line: 5, column: 1
%0:_(<2 x p0>) = COPY $q0
%1:_(p0) = COPY $x0
G_STORE %0(<2 x p0>), %1(p0), debug-location !11 :: (store 16)
G_STORE %0(<2 x p0>), %1(p0), debug-location !11 :: (store (<2 x p0>))
RET_ReallyLR debug-location !12
...

View File

@ -32,11 +32,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(<2 x p0>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s64>) = G_BITCAST [[COPY]](<2 x p0>)
; CHECK: G_STORE [[BITCAST]](<2 x s64>), [[COPY1]](p0) :: (store 16 into %ir.ptr)
; CHECK: G_STORE [[BITCAST]](<2 x s64>), [[COPY1]](p0) :: (store (<2 x p0>) into %ir.ptr)
; CHECK: RET_ReallyLR
%0:_(<2 x p0>) = COPY $q0
%1:_(p0) = COPY $x0
G_STORE %0(<2 x p0>), %1(p0) :: (store 16 into %ir.ptr)
G_STORE %0(<2 x p0>), %1(p0) :: (store (<2 x p0>) into %ir.ptr)
RET_ReallyLR
...
@ -52,12 +52,12 @@ body: |
; CHECK-LABEL: name: load_v2p0
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x p0>) from %ir.ptr)
; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
; CHECK: $q0 = COPY [[BITCAST]](<2 x p0>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(p0) = COPY $x0
%1:_(<2 x p0>) = G_LOAD %0(p0) :: (load 16 from %ir.ptr)
%1:_(<2 x p0>) = G_LOAD %0(p0) :: (load (<2 x p0>) from %ir.ptr)
$q0 = COPY %1(<2 x p0>)
RET_ReallyLR implicit $q0
@ -76,11 +76,11 @@ body: |
; CHECK-LABEL: name: load_v2p1
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x p1>) = G_LOAD [[COPY]](p0) :: (load (<2 x p1>) from %ir.ptr)
; CHECK: $q0 = COPY [[LOAD]](<2 x p1>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(p0) = COPY $x0
%1:_(<2 x p1>) = G_LOAD %0(p0) :: (load 16 from %ir.ptr)
%1:_(<2 x p1>) = G_LOAD %0(p0) :: (load (<2 x p1>) from %ir.ptr)
$q0 = COPY %1(<2 x p1>)
RET_ReallyLR implicit $q0

View File

@ -8,48 +8,48 @@ body: |
; CHECK-LABEL: name: test_load
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s1))
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
; CHECK: $w0 = COPY [[ANYEXT1]](s32)
; CHECK: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load 2)
; CHECK: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load (s16))
; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD2]](s16)
; CHECK: $w0 = COPY [[ANYEXT2]](s32)
; CHECK: $w0 = COPY [[ANYEXT1]](s32)
; CHECK: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
; CHECK: $x0 = COPY [[LOAD3]](s64)
; CHECK: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load (p0))
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[LOAD4]](p0)
; CHECK: $x0 = COPY [[PTRTOINT]](s64)
; CHECK: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD5:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<2 x s32>))
; CHECK: [[BITCAST:%[0-9]+]]:_(s64) = G_BITCAST [[LOAD5]](<2 x s32>)
; CHECK: $x0 = COPY [[BITCAST]](s64)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD6:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128))
; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[LOAD6]](s128)
; CHECK: $x0 = COPY [[TRUNC]](s64)
%0:_(p0) = COPY $x0
%1:_(s1) = G_LOAD %0(p0) :: (load 1)
%1:_(s1) = G_LOAD %0(p0) :: (load (s1))
%2:_(s32) = G_ANYEXT %1(s1)
$w0 = COPY %2(s32)
%3:_(s8) = G_LOAD %0(p0) :: (load 1)
%3:_(s8) = G_LOAD %0(p0) :: (load (s8))
%4:_(s32) = G_ANYEXT %3(s8)
$w0 = COPY %4(s32)
%5:_(s16) = G_LOAD %0(p0) :: (load 2)
%5:_(s16) = G_LOAD %0(p0) :: (load (s16))
%6:_(s32) = G_ANYEXT %5(s16)
$w0 = COPY %6(s32)
%7:_(s32) = G_LOAD %0(p0) :: (load 4)
%7:_(s32) = G_LOAD %0(p0) :: (load (s32))
$w0 = COPY %4(s32)
%8:_(s64) = G_LOAD %0(p0) :: (load 8)
%8:_(s64) = G_LOAD %0(p0) :: (load (s64))
$x0 = COPY %8(s64)
%9:_(p0) = G_LOAD %0(p0) :: (load 8)
%9:_(p0) = G_LOAD %0(p0) :: (load (p0))
%10:_(s64) = G_PTRTOINT %9(p0)
$x0 = COPY %10(s64)
%11:_(<2 x s32>) = G_LOAD %0(p0) :: (load 8)
%11:_(<2 x s32>) = G_LOAD %0(p0) :: (load (<2 x s32>))
%12:_(s64) = G_BITCAST %11(<2 x s32>)
$x0 = COPY %12(s64)
%13:_(s128) = G_LOAD %0(p0) :: (load 16)
%13:_(s128) = G_LOAD %0(p0) :: (load (s128))
%14:_(s64) = G_TRUNC %13(s128)
$x0 = COPY %14(s64)
...
@ -67,33 +67,33 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[AND]](s32)
; CHECK: G_STORE [[TRUNC]](s8), [[COPY]](p0) :: (store 1)
; CHECK: G_STORE [[TRUNC]](s8), [[COPY]](p0) :: (store (s1))
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
; CHECK: G_STORE [[TRUNC1]](s8), [[COPY]](p0) :: (store 1)
; CHECK: G_STORE [[TRUNC1]](s8), [[COPY]](p0) :: (store (s8))
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; CHECK: G_STORE [[TRUNC2]](s16), [[COPY]](p0) :: (store 2)
; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 4)
; CHECK: G_STORE [[TRUNC2]](s16), [[COPY]](p0) :: (store (s16))
; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store (s32))
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY]](p0)
; CHECK: G_STORE [[PTRTOINT]](s64), [[COPY]](p0) :: (store 8)
; CHECK: G_STORE [[COPY]](p0), [[COPY]](p0) :: (store 8)
; CHECK: G_STORE [[PTRTOINT]](s64), [[COPY]](p0) :: (store (s64))
; CHECK: G_STORE [[COPY]](p0), [[COPY]](p0) :: (store (p0))
; CHECK: [[PTRTOINT1:%[0-9]+]]:_(s64) = G_PTRTOINT [[COPY]](p0)
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[PTRTOINT1]](s64), [[PTRTOINT1]](s64)
; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16)
; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128))
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s1) = G_TRUNC %1(s32)
G_STORE %2(s1), %0(p0) :: (store 1)
G_STORE %2(s1), %0(p0) :: (store (s1))
%3:_(s8) = G_TRUNC %1(s32)
G_STORE %3(s8), %0(p0) :: (store 1)
G_STORE %3(s8), %0(p0) :: (store (s8))
%4:_(s16) = G_TRUNC %1(s32)
G_STORE %4(s16), %0(p0) :: (store 2)
G_STORE %1(s32), %0(p0) :: (store 4)
G_STORE %4(s16), %0(p0) :: (store (s16))
G_STORE %1(s32), %0(p0) :: (store (s32))
%5:_(s64) = G_PTRTOINT %0(p0)
G_STORE %5(s64), %0(p0) :: (store 8)
G_STORE %0(p0), %0(p0) :: (store 8)
G_STORE %5(s64), %0(p0) :: (store (s64))
G_STORE %0(p0), %0(p0) :: (store (p0))
%6:_(s64) = G_PTRTOINT %0(p0)
%7:_(s128) = G_MERGE_VALUES %6(s64), %6
G_STORE %7(s128), %0(p0) :: (store 16)
G_STORE %7(s128), %0(p0) :: (store (s128))
...
---
name: store_4xi16
@ -108,11 +108,11 @@ body: |
; CHECK: liveins: $d0, $x0
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: G_STORE [[COPY]](<4 x s16>), [[COPY1]](p0) :: (store 8)
; CHECK: G_STORE [[COPY]](<4 x s16>), [[COPY1]](p0) :: (store (<4 x s16>))
; CHECK: RET_ReallyLR
%0:_(<4 x s16>) = COPY $d0
%1:_(p0) = COPY $x0
G_STORE %0(<4 x s16>), %1(p0) :: (store 8)
G_STORE %0(<4 x s16>), %1(p0) :: (store (<4 x s16>))
RET_ReallyLR
...
@ -129,11 +129,11 @@ body: |
; CHECK: liveins: $q0, $x0
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: G_STORE [[COPY]](<4 x s32>), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[COPY]](<4 x s32>), [[COPY1]](p0) :: (store (<4 x s32>))
; CHECK: RET_ReallyLR
%0:_(<4 x s32>) = COPY $q0
%1:_(p0) = COPY $x0
G_STORE %0(<4 x s32>), %1(p0) :: (store 16)
G_STORE %0(<4 x s32>), %1(p0) :: (store (<4 x s32>))
RET_ReallyLR
...
@ -150,11 +150,11 @@ body: |
; CHECK: liveins: $q0, $x0
; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: G_STORE [[COPY]](<8 x s16>), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[COPY]](<8 x s16>), [[COPY1]](p0) :: (store (<8 x s16>))
; CHECK: RET_ReallyLR
%0:_(<8 x s16>) = COPY $q0
%1:_(p0) = COPY $x0
G_STORE %0(<8 x s16>), %1(p0) :: (store 16)
G_STORE %0(<8 x s16>), %1(p0) :: (store (<8 x s16>))
RET_ReallyLR
...
@ -171,11 +171,11 @@ body: |
; CHECK: liveins: $q0, $x0
; CHECK: [[COPY:%[0-9]+]]:_(<16 x s8>) = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: G_STORE [[COPY]](<16 x s8>), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[COPY]](<16 x s8>), [[COPY1]](p0) :: (store (<16 x s8>))
; CHECK: RET_ReallyLR
%0:_(<16 x s8>) = COPY $q0
%1:_(p0) = COPY $x0
G_STORE %0(<16 x s8>), %1(p0) :: (store 16)
G_STORE %0(<16 x s8>), %1(p0) :: (store (<16 x s8>))
RET_ReallyLR
...
@ -191,11 +191,11 @@ body: |
; CHECK-LABEL: name: load_4xi16
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<4 x s16>))
; CHECK: $d0 = COPY [[LOAD]](<4 x s16>)
; CHECK: RET_ReallyLR implicit $d0
%0:_(p0) = COPY $x0
%1:_(<4 x s16>) = G_LOAD %0(p0) :: (load 8)
%1:_(<4 x s16>) = G_LOAD %0(p0) :: (load (<4 x s16>))
$d0 = COPY %1(<4 x s16>)
RET_ReallyLR implicit $d0
@ -212,11 +212,11 @@ body: |
; CHECK-LABEL: name: load_4xi32
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; CHECK: $q0 = COPY [[LOAD]](<4 x s32>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(p0) = COPY $x0
%1:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16)
%1:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>))
$q0 = COPY %1(<4 x s32>)
RET_ReallyLR implicit $q0
@ -233,11 +233,11 @@ body: |
; CHECK-LABEL: name: load_8xi16
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<8 x s16>))
; CHECK: $q0 = COPY [[LOAD]](<8 x s16>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(p0) = COPY $x0
%1:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16)
%1:_(<8 x s16>) = G_LOAD %0(p0) :: (load (<8 x s16>))
$q0 = COPY %1(<8 x s16>)
RET_ReallyLR implicit $q0
@ -254,11 +254,11 @@ body: |
; CHECK-LABEL: name: load_16xi8
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<16 x s8>))
; CHECK: $q0 = COPY [[LOAD]](<16 x s8>)
; CHECK: RET_ReallyLR implicit $q0
%0:_(p0) = COPY $x0
%1:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16)
%1:_(<16 x s8>) = G_LOAD %0(p0) :: (load (<16 x s8>))
$q0 = COPY %1(<16 x s8>)
RET_ReallyLR implicit $q0
@ -274,11 +274,11 @@ body: |
; CHECK-LABEL: name: load_8xi8
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<8 x s8>))
; CHECK: $d0 = COPY [[LOAD]](<8 x s8>)
; CHECK: RET_ReallyLR implicit $d0
%0:_(p0) = COPY $x0
%1:_(<8 x s8>) = G_LOAD %0(p0) :: (load 8)
%1:_(<8 x s8>) = G_LOAD %0(p0) :: (load (<8 x s8>))
$d0 = COPY %1(<8 x s8>)
RET_ReallyLR implicit $d0
@ -295,11 +295,11 @@ body: |
; CHECK: liveins: $x0, $d0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(<8 x s8>) = COPY $d0
; CHECK: G_STORE [[COPY1]](<8 x s8>), [[COPY]](p0) :: (store 8)
; CHECK: G_STORE [[COPY1]](<8 x s8>), [[COPY]](p0) :: (store (<8 x s8>))
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(<8 x s8>) = COPY $d0
G_STORE %1(<8 x s8>), %0(p0) :: (store 8)
G_STORE %1(<8 x s8>), %0(p0) :: (store (<8 x s8>))
RET_ReallyLR
...
---
@ -316,14 +316,14 @@ body: |
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s8>), %ptr(p0) :: (store 16, align 32)
; CHECK: G_STORE [[BUILD_VECTOR]](<16 x s8>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s8>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[BUILD_VECTOR1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%val:_(<32 x s8>) = G_IMPLICIT_DEF
%ptr:_(p0) = COPY $x0
G_STORE %val(<32 x s8>), %ptr(p0) :: (store 32)
G_STORE %val(<32 x s8>), %ptr(p0) :: (store (<32 x s8>))
RET_ReallyLR
...
---
@ -340,14 +340,14 @@ body: |
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s16>), %ptr(p0) :: (store 16, align 32)
; CHECK: G_STORE [[BUILD_VECTOR]](<8 x s16>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s16>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[BUILD_VECTOR1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%val:_(<16 x s16>) = G_IMPLICIT_DEF
%ptr:_(p0) = COPY $x0
G_STORE %val(<16 x s16>), %ptr(p0) :: (store 32)
G_STORE %val(<16 x s16>), %ptr(p0) :: (store (<16 x s16>))
RET_ReallyLR
...
---
@ -364,14 +364,14 @@ body: |
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), %ptr(p0) :: (store 16, align 32)
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%val:_(<8 x s32>) = G_IMPLICIT_DEF
%ptr:_(p0) = COPY $x0
G_STORE %val(<8 x s32>), %ptr(p0) :: (store 32)
G_STORE %val(<8 x s32>), %ptr(p0) :: (store (<8 x s32>))
RET_ReallyLR
...
---
@ -386,14 +386,14 @@ body: |
; CHECK: liveins: $x0
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store 16, align 32)
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%val:_(<4 x s64>) = G_IMPLICIT_DEF
%ptr:_(p0) = COPY $x0
G_STORE %val(<4 x s64>), %ptr(p0) :: (store 32)
G_STORE %val(<4 x s64>), %ptr(p0) :: (store (<4 x s64>))
RET_ReallyLR
...
---
@ -407,17 +407,17 @@ body: |
; CHECK-LABEL: name: load_32xs8
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load 16, align 32)
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store 16, align 32)
; CHECK: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<16 x s8>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[LOAD1]](<16 x s8>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%val:_(<32 x s8>) = G_LOAD %ptr(p0) :: (load 32)
G_STORE %val(<32 x s8>), %ptr(p0) :: (store 32)
%val:_(<32 x s8>) = G_LOAD %ptr(p0) :: (load (<32 x s8>))
G_STORE %val(<32 x s8>), %ptr(p0) :: (store (<32 x s8>))
RET_ReallyLR
...
---
@ -431,17 +431,17 @@ body: |
; CHECK-LABEL: name: load_16xs16
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load 16, align 32)
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store 16, align 32)
; CHECK: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<8 x s16>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[LOAD1]](<8 x s16>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%val:_(<16 x s16>) = G_LOAD %ptr(p0) :: (load 32)
G_STORE %val(<16 x s16>), %ptr(p0) :: (store 32)
%val:_(<16 x s16>) = G_LOAD %ptr(p0) :: (load (<16 x s16>))
G_STORE %val(<16 x s16>), %ptr(p0) :: (store (<16 x s16>))
RET_ReallyLR
...
---
@ -455,17 +455,17 @@ body: |
; CHECK-LABEL: name: load_8xs32
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load 16, align 32)
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store 16, align 32)
; CHECK: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<4 x s32>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[LOAD1]](<4 x s32>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%val:_(<8 x s32>) = G_LOAD %ptr(p0) :: (load 32)
G_STORE %val(<8 x s32>), %ptr(p0) :: (store 32)
%val:_(<8 x s32>) = G_LOAD %ptr(p0) :: (load (<8 x s32>))
G_STORE %val(<8 x s32>), %ptr(p0) :: (store (<8 x s32>))
RET_ReallyLR
...
---
@ -479,17 +479,17 @@ body: |
; CHECK-LABEL: name: load_4xs64
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load 16, align 32)
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr(p0) :: (load (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load 16 from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store 16, align 32)
; CHECK: [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (s128) from unknown-address + 16)
; CHECK: G_STORE [[LOAD]](<2 x s64>), %ptr(p0) :: (store (s128), align 32)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[LOAD1]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%val:_(<4 x s64>) = G_LOAD %ptr(p0) :: (load 32)
G_STORE %val(<4 x s64>), %ptr(p0) :: (store 32)
%val:_(<4 x s64>) = G_LOAD %ptr(p0) :: (load (<4 x s64>))
G_STORE %val(<4 x s64>), %ptr(p0) :: (store (<4 x s64>))
RET_ReallyLR
...
---
@ -502,20 +502,20 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
; CHECK: %val64:_(s64) = COPY $x2
; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 1)
; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store 2)
; CHECK: G_STORE %val64(s64), [[COPY]](p0) :: (store 1)
; CHECK: G_STORE %val64(s64), [[COPY]](p0) :: (store 2)
; CHECK: G_STORE %val64(s64), [[COPY]](p0) :: (store 4)
; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store (s8))
; CHECK: G_STORE [[COPY1]](s32), [[COPY]](p0) :: (store (s16))
; CHECK: G_STORE %val64(s64), [[COPY]](p0) :: (store (s8))
; CHECK: G_STORE %val64(s64), [[COPY]](p0) :: (store (s16))
; CHECK: G_STORE %val64(s64), [[COPY]](p0) :: (store (s32))
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s8) = G_TRUNC %1(s32)
%val64:_(s64) = COPY $x2
G_STORE %1(s32), %0(p0) :: (store 1)
G_STORE %1(s32), %0(p0) :: (store 2)
G_STORE %val64(s64), %0(p0) :: (store 1)
G_STORE %val64(s64), %0(p0) :: (store 2)
G_STORE %val64(s64), %0(p0) :: (store 4)
G_STORE %1(s32), %0(p0) :: (store (s8))
G_STORE %1(s32), %0(p0) :: (store (s16))
G_STORE %val64(s64), %0(p0) :: (store (s8))
G_STORE %val64(s64), %0(p0) :: (store (s16))
G_STORE %val64(s64), %0(p0) :: (store (s32))
...
---
name: store_6xs64
@ -527,16 +527,16 @@ body: |
; CHECK: liveins: $x0
; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store 16)
; CHECK: G_STORE [[DEF]](<2 x s64>), %ptr(p0) :: (store (s128))
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C]](s64)
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr, [[C1]](s64)
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store 16 into unknown-address + 32)
; CHECK: G_STORE [[DEF]](<2 x s64>), [[PTR_ADD1]](p0) :: (store (s128) into unknown-address + 32)
; CHECK: RET_ReallyLR
%val:_(<6 x s64>) = G_IMPLICIT_DEF
%ptr:_(p0) = COPY $x0
G_STORE %val(<6 x s64>), %ptr(p0) :: (store 48, align 16)
G_STORE %val(<6 x s64>), %ptr(p0) :: (store (<6 x s64>), align 16)
RET_ReallyLR
...

View File

@ -11,11 +11,11 @@ body: |
; CHECK-LABEL: name: test_load_trunc
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (load 2)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (load (s10))
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[LOAD]](s16)
; CHECK: RET_ReallyLR implicit [[TRUNC]](s1)
%0:_(p0) = G_FRAME_INDEX %stack.0
%1:_(s10) = G_LOAD %0(p0) :: (load 2)
%1:_(s10) = G_LOAD %0(p0) :: (load (s10))
%2:_(s1) = G_TRUNC %1(s10)
RET_ReallyLR implicit %2(s1)
...

View File

@ -54,7 +54,7 @@ body: |
%2:_(s32) = COPY $w2
%3:_(s64) = G_ZEXT %2(s32), debug-location !11
%4:_(s8) = G_TRUNC %1(s32), debug-location !11
G_MEMSET %0(p0), %4(s8), %3(s64), 0, debug-location !11 :: (store 1 into %ir.ptr)
G_MEMSET %0(p0), %4(s8), %3(s64), 0, debug-location !11 :: (store (s8) into %ir.ptr)
RET_ReallyLR debug-location !12
...

View File

@ -120,15 +120,15 @@ body: |
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1
; CHECK: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.3
; CHECK: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store 8)
; CHECK: G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store 8)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load 8)
; CHECK: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64))
; CHECK: G_STORE [[COPY1]](s64), [[FRAME_INDEX1]](p0) :: (store (s64))
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (dereferenceable load (s64))
; CHECK: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s64))
; CHECK: [[UMULH:%[0-9]+]]:_(s64) = G_UMULH [[LOAD]], [[LOAD1]]
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[LOAD1]]
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UMULH]](s64), [[C]]
; CHECK: G_STORE [[C]](s64), [[FRAME_INDEX2]](p0) :: (store 8, align 1)
; CHECK: G_STORE [[C]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 1)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ICMP]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C1]]
@ -142,13 +142,13 @@ body: |
%3:_(p0) = G_FRAME_INDEX %stack.0
%4:_(p0) = G_FRAME_INDEX %stack.1
%6:_(p0) = G_FRAME_INDEX %stack.3
G_STORE %2(s64), %3(p0) :: (store 8)
G_STORE %1(s64), %4(p0) :: (store 8)
%7:_(s64) = G_LOAD %3(p0) :: (dereferenceable load 8)
%8:_(s64) = G_LOAD %4(p0) :: (dereferenceable load 8)
G_STORE %2(s64), %3(p0) :: (store (s64))
G_STORE %1(s64), %4(p0) :: (store (s64))
%7:_(s64) = G_LOAD %3(p0) :: (dereferenceable load (s64))
%8:_(s64) = G_LOAD %4(p0) :: (dereferenceable load (s64))
%9:_(s64), %10:_(s1) = G_UMULO %7, %8
%31:_(s64) = G_CONSTANT i64 0
G_STORE %31(s64), %6(p0) :: (store 8, align 1)
G_STORE %31(s64), %6(p0) :: (store (s64), align 1)
%16:_(s64) = G_ZEXT %10(s1)
$x0 = COPY %9(s64)
$x1 = COPY %16(s64)

View File

@ -24,25 +24,25 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 2 from %ir.ptr, align 4)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s16) from %ir.ptr, align 4)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load 1 from %ir.ptr + 2, align 2, basealign 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s8) from %ir.ptr + 2, align 2, basealign 4)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[LOAD]], [[C2]](s64)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[SHL]], [[ZEXTLOAD]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY2]], [[C2]](s64)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s64)
; CHECK: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 2 into %ir.ptr2, align 4)
; CHECK: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store 1 into %ir.ptr2 + 2, align 2, basealign 4)
; CHECK: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store (s16) into %ir.ptr2, align 4)
; CHECK: G_STORE [[LSHR]](s32), [[PTR_ADD1]](p0) :: (store (s8) into %ir.ptr2 + 2, align 2, basealign 4)
; CHECK: $w0 = COPY [[C]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%3:_(s32) = G_CONSTANT i32 0
%2:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.ptr, align 4)
G_STORE %2(s24), %1(p0) :: (store 3 into %ir.ptr2, align 4)
%2:_(s24) = G_LOAD %0(p0) :: (load (s24) from %ir.ptr, align 4)
G_STORE %2(s24), %1(p0) :: (store (s24) into %ir.ptr2, align 4)
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0

View File

@ -43,28 +43,30 @@ body: |
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[C]](s64)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
; CHECK: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
; CHECK: [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; CHECK: [[DEF1:%[0-9]+]]:_(s1) = G_IMPLICIT_DEF
; CHECK: [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; CHECK: bb.1.bb1:
; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
; CHECK: [[PHI:%[0-9]+]]:_(p0) = G_PHI %6(p0), %bb.2, [[DEF]](p0), %bb.0
; CHECK: [[PHI1:%[0-9]+]]:_(s16) = G_PHI %20(s16), %bb.2, [[DEF1]](s16), %bb.0
; CHECK: [[PHI1:%[0-9]+]]:_(s16) = G_PHI %20(s16), %bb.2, [[DEF2]](s16), %bb.0
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[PHI1]](s16)
; CHECK: G_BRCOND [[TRUNC]](s1), %bb.3
; CHECK: bb.2.bb3:
; CHECK: successors: %bb.3(0x40000000), %bb.1(0x40000000)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C1]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PHI]](p0) :: (load 2 from %ir.lsr.iv)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C2]](s64)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PHI]](p0) :: (load (s16) from %ir.lsr.iv)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT]](s32), [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT]](s32), [[C3]]
; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP]](p0) :: (load 2 from %ir.tmp5)
; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from %ir.tmp5)
; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT1]](s32), [[COPY]]
; CHECK: [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PHI]], [[C1]](s64)
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PHI]], [[C2]](s64)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ICMP1]](s32)
; CHECK: G_BRCOND [[TRUNC1]](s1), %bb.3
; CHECK: G_BR %bb.1
@ -85,9 +87,9 @@ body: |
bb.3.bb3:
%4:_(s64) = G_CONSTANT i64 4
%5:_(p0) = G_PTR_ADD %2, %4(s64)
%6:_(s16) = G_LOAD %0(p0) :: (load 2 from %ir.lsr.iv)
%6:_(s16) = G_LOAD %0(p0) :: (load (s16) from %ir.lsr.iv)
%8:_(s1) = G_ICMP intpred(eq), %6(s16), %7
%9:_(s16) = G_LOAD %5(p0) :: (load 2 from %ir.tmp5)
%9:_(s16) = G_LOAD %5(p0) :: (load (s16) from %ir.tmp5)
%10:_(s1) = G_ICMP intpred(eq), %9(s16), %7
%11:_(p0) = G_PTR_ADD %0, %4(s64)
G_BRCOND %8(s1), %bb.4

View File

@ -577,8 +577,8 @@ body: |
; CHECK: [[PHI1:%[0-9]+]]:_(s64) = G_PHI [[DEF]](s64), %bb.0
; CHECK: [[PHI2:%[0-9]+]]:_(s64) = G_PHI [[COPY]](s64), %bb.0
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[PHI]](s64), [[PHI1]](s64)
; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[PHI2]](s64), [[COPY1]](p0) :: (store 8)
; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
; CHECK: G_STORE [[PHI2]](s64), [[COPY1]](p0) :: (store (s64))
; CHECK: RET_ReallyLR
; Check that the G_MERGE here gets inserted after all the PHIs.
bb.0:
@ -593,8 +593,8 @@ body: |
bb.1:
%3:_(s128) = G_PHI %2(s128), %bb.0
%4:_(s64) = G_PHI %0(s64), %bb.0
G_STORE %3(s128), %1(p0) :: (store 16)
G_STORE %4(s64), %1(p0) :: (store 8)
G_STORE %3(s128), %1(p0) :: (store (s128))
G_STORE %4(s64), %1(p0) :: (store (s64))
RET_ReallyLR
...

View File

@ -10,13 +10,13 @@ body: |
; CHECK-LABEL: name: add_v16s8
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<16 x s8>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s8) = G_VECREDUCE_ADD [[LOAD]](<16 x s8>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[VECREDUCE_ADD]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<16 x s8>) = G_LOAD %0(p0) :: (load 16)
%1:_(<16 x s8>) = G_LOAD %0(p0) :: (load (<16 x s8>))
%2:_(s8) = G_VECREDUCE_ADD %1(<16 x s8>)
%3:_(s32) = G_ANYEXT %2(s8)
$w0 = COPY %3(s32)
@ -33,13 +33,13 @@ body: |
; CHECK-LABEL: name: add_v8s16
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<8 x s16>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s16) = G_VECREDUCE_ADD [[LOAD]](<8 x s16>)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[VECREDUCE_ADD]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<8 x s16>) = G_LOAD %0(p0) :: (load 16)
%1:_(<8 x s16>) = G_LOAD %0(p0) :: (load (<8 x s16>))
%2:_(s16) = G_VECREDUCE_ADD %1(<8 x s16>)
%3:_(s32) = G_ANYEXT %2(s16)
$w0 = COPY %3(s32)
@ -56,12 +56,12 @@ body: |
; CHECK-LABEL: name: add_v4s32
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<4 x s32>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[LOAD]](<4 x s32>)
; CHECK: $w0 = COPY [[VECREDUCE_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<4 x s32>) = G_LOAD %0(p0) :: (load 16)
%1:_(<4 x s32>) = G_LOAD %0(p0) :: (load (<4 x s32>))
%2:_(s32) = G_VECREDUCE_ADD %1(<4 x s32>)
$w0 = COPY %2(s32)
RET_ReallyLR implicit $w0
@ -77,12 +77,12 @@ body: |
; CHECK-LABEL: name: add_v2s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<2 x s64>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s64) = G_VECREDUCE_ADD [[LOAD]](<2 x s64>)
; CHECK: $x0 = COPY [[VECREDUCE_ADD]](s64)
; CHECK: RET_ReallyLR implicit $x0
%0:_(p0) = COPY $x0
%1:_(<2 x s64>) = G_LOAD %0(p0) :: (load 16)
%1:_(<2 x s64>) = G_LOAD %0(p0) :: (load (<2 x s64>))
%2:_(s64) = G_VECREDUCE_ADD %1(<2 x s64>)
$x0 = COPY %2(s64)
RET_ReallyLR implicit $x0
@ -98,12 +98,12 @@ body: |
; CHECK-LABEL: name: add_v2s32
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<2 x s32>))
; CHECK: [[VECREDUCE_ADD:%[0-9]+]]:_(s32) = G_VECREDUCE_ADD [[LOAD]](<2 x s32>)
; CHECK: $w0 = COPY [[VECREDUCE_ADD]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(<2 x s32>) = G_LOAD %0(p0) :: (load 8)
%1:_(<2 x s32>) = G_LOAD %0(p0) :: (load (<2 x s32>))
%2:_(s32) = G_VECREDUCE_ADD %1(<2 x s32>)
$w0 = COPY %2(s32)
RET_ReallyLR implicit $w0

View File

@ -25,8 +25,8 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.v1ptr)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.v2ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128) from %ir.v1ptr)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.v2ptr)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128)
; CHECK: $x0 = COPY [[UV]](s64)
@ -39,14 +39,14 @@ body: |
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16 into %ir.v1ptr)
; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128) into %ir.v1ptr)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s128) = G_LOAD %0(p0) :: (load 16 from %ir.v1ptr)
%3:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.v2ptr)
%2:_(s128) = G_LOAD %0(p0) :: (load (s128) from %ir.v1ptr)
%3:_(s128) = G_LOAD %1(p0) :: (load (s128) from %ir.v2ptr)
%4:_(s128) = G_UDIV %2, %3
G_STORE %4(s128), %0(p0) :: (store 16 into %ir.v1ptr)
G_STORE %4(s128), %0(p0) :: (store (s128) into %ir.v1ptr)
RET_ReallyLR
...
@ -66,8 +66,8 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.v1ptr)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load 16 from %ir.v2ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128) from %ir.v1ptr)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY1]](p0) :: (load (s128) from %ir.v2ptr)
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128)
; CHECK: $x0 = COPY [[UV]](s64)
@ -80,14 +80,14 @@ body: |
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY2]](s64), [[COPY3]](s64)
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store 16 into %ir.v1ptr)
; CHECK: G_STORE [[MV]](s128), [[COPY]](p0) :: (store (s128) into %ir.v1ptr)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s128) = G_LOAD %0(p0) :: (load 16 from %ir.v1ptr)
%3:_(s128) = G_LOAD %1(p0) :: (load 16 from %ir.v2ptr)
%2:_(s128) = G_LOAD %0(p0) :: (load (s128) from %ir.v1ptr)
%3:_(s128) = G_LOAD %1(p0) :: (load (s128) from %ir.v2ptr)
%4:_(s128) = G_SDIV %2, %3
G_STORE %4(s128), %0(p0) :: (store 16 into %ir.v1ptr)
G_STORE %4(s128), %0(p0) :: (store (s128) into %ir.v1ptr)
RET_ReallyLR
...

View File

@ -14,12 +14,12 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[C]](s64)
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[ASHR]](s64)
; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s128) = G_SEXT %0(s64)
G_STORE %2(s128), %1(p0) :: (store 16)
G_STORE %2(s128), %1(p0) :: (store (s128))
RET_ReallyLR
...
@ -36,12 +36,12 @@ body: |
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[C]](s64)
; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[MV]](s128), [[COPY1]](p0) :: (store (s128))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s128) = G_ZEXT %0(s64)
G_STORE %2(s128), %1(p0) :: (store 16)
G_STORE %2(s128), %1(p0) :: (store (s128))
RET_ReallyLR
...
@ -60,12 +60,12 @@ body: |
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[MV]](s64), [[C1]](s64)
; CHECK: G_STORE [[MV1]](s128), [[COPY1]](p0) :: (store 16)
; CHECK: G_STORE [[MV1]](s128), [[COPY1]](p0) :: (store (s128))
; CHECK: RET_ReallyLR
%0:_(s32) = COPY $w0
%1:_(p0) = COPY $x1
%2:_(s128) = G_ZEXT %0(s32)
G_STORE %2(s128), %1(p0) :: (store 16)
G_STORE %2(s128), %1(p0) :: (store (s128))
RET_ReallyLR
...
@ -81,17 +81,17 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8)
; CHECK: G_STORE [[C]](s64), [[COPY1]](p0) :: (store 8)
; CHECK: G_STORE [[C]](s64), [[COPY1]](p0) :: (store 8)
; CHECK: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store (s64))
; CHECK: G_STORE [[C]](s64), [[COPY1]](p0) :: (store (s64))
; CHECK: G_STORE [[C]](s64), [[COPY1]](p0) :: (store (s64))
; CHECK: RET_ReallyLR
%0:_(s64) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s192) = G_ZEXT %0(s64)
%3:_(s64), %4:_(s64), %5:_(s64) = G_UNMERGE_VALUES %2(s192)
G_STORE %3, %1(p0) :: (store 8)
G_STORE %4, %1(p0) :: (store 8)
G_STORE %5, %1(p0) :: (store 8)
G_STORE %3, %1(p0) :: (store (s64))
G_STORE %4, %1(p0) :: (store (s64))
G_STORE %5, %1(p0) :: (store (s64))
RET_ReallyLR
...

View File

@ -7,9 +7,9 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_sextload
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_SEXTLOAD %0 :: (load 1)
%1:_(s32) = G_SEXTLOAD %0 :: (load (s8))
$w0 = COPY %1
...

View File

@ -153,10 +153,10 @@ body: |
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SHUF:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY1]](<2 x s64>), [[COPY2]], shufflemask(1, 2)
; CHECK: [[SHUF1:%[0-9]+]]:_(<2 x s64>) = G_SHUFFLE_VECTOR [[COPY3]](<2 x s64>), [[COPY]], shufflemask(1, 2)
; CHECK: G_STORE [[SHUF]](<2 x s64>), [[COPY4]](p0) :: (store 16, align 32)
; CHECK: G_STORE [[SHUF]](<2 x s64>), [[COPY4]](p0) :: (store (s128), align 32)
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C]](s64)
; CHECK: G_STORE [[SHUF1]](<2 x s64>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[SHUF1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%3:_(<2 x s64>) = COPY $q0
%4:_(<2 x s64>) = COPY $q1
@ -166,7 +166,7 @@ body: |
%1:_(<4 x s64>) = G_CONCAT_VECTORS %5(<2 x s64>), %6(<2 x s64>)
%2:_(p0) = COPY $x0
%7:_(<4 x s64>) = G_SHUFFLE_VECTOR %0(<4 x s64>), %1, shufflemask(3, 4, 7, 0)
G_STORE %7(<4 x s64>), %2(p0) :: (store 32)
G_STORE %7(<4 x s64>), %2(p0) :: (store (<4 x s64>))
RET_ReallyLR
...
@ -195,10 +195,10 @@ body: |
; CHECK: [[EVEC3:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY3]](<4 x s32>), [[C3]](s64)
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[EVEC]](s32), [[EVEC1]](s32), [[EVEC2]](s32), [[EVEC3]](s32)
; CHECK: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[COPY1]](<4 x s32>), [[COPY]], shufflemask(2, 6, 5, 3)
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](p0) :: (store 16, align 32)
; CHECK: G_STORE [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](p0) :: (store (s128), align 32)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C4]](s64)
; CHECK: G_STORE [[SHUF]](<4 x s32>), [[PTR_ADD]](p0) :: (store 16 into unknown-address + 16)
; CHECK: G_STORE [[SHUF]](<4 x s32>), [[PTR_ADD]](p0) :: (store (s128) into unknown-address + 16)
; CHECK: RET_ReallyLR
%3:_(<4 x s32>) = COPY $q0
%4:_(<4 x s32>) = COPY $q1
@ -208,7 +208,7 @@ body: |
%1:_(<8 x s32>) = G_CONCAT_VECTORS %5(<4 x s32>), %6(<4 x s32>)
%2:_(p0) = COPY $x0
%7:_(<8 x s32>) = G_SHUFFLE_VECTOR %0(<8 x s32>), %1, shufflemask(0, 5, 10, 15, 6, 2, 1, 7)
G_STORE %7(<8 x s32>), %2(p0) :: (store 32)
G_STORE %7(<8 x s32>), %2(p0) :: (store (<8 x s32>))
RET_ReallyLR
...

View File

@ -33,20 +33,20 @@ body: |
bb.0:
; CHECK-LABEL: name: test_vaarg
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load 8)
; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load (p0))
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD]], [[C]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
; CHECK: G_STORE [[PTR_ADD]](p0), [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store 8)
; CHECK: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (load 8)
; CHECK: G_STORE [[PTR_ADD]](p0), [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store (p0))
; CHECK: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (load (p0))
; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD1]], [[C]](s64), debug-location !DILocation(line: 5, column: 1, scope: {{.*}})
; CHECK: G_STORE [[PTR_ADD1]](p0), [[COPY]](p0), debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (store 8)
; CHECK: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load 8)
; CHECK: G_STORE [[PTR_ADD1]](p0), [[COPY]](p0), debug-location !DILocation(line: 5, column: 1, scope: {{.*}}) :: (store (p0))
; CHECK: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (load (p0))
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
; CHECK: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[LOAD2]], [[C1]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16
; CHECK: [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[PTR_ADD2]], [[C2]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
; CHECK: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTRMASK]], [[C]](s64), debug-location !DILocation(line: 4, column: 3, scope: {{.*}})
; CHECK: G_STORE [[PTR_ADD3]](p0), [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store 8)
; CHECK: G_STORE [[PTR_ADD3]](p0), [[COPY]](p0), debug-location !DILocation(line: 4, column: 3, scope: {{.*}}) :: (store (p0))
%0:_(p0) = COPY $x0
%1:_(s8) = G_VAARG %0(p0), 1, debug-location !11

View File

@ -7,9 +7,9 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_zextload
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s32) = G_ZEXTLOAD %0 :: (load 1)
%1:_(s32) = G_ZEXTLOAD %0 :: (load (s8))
$w0 = COPY %1
...

View File

@ -100,9 +100,9 @@ body: |
; CHECK-LABEL: name: unmerge_merge_combine
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8)
; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load (s64))
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load 16)
; CHECK: [[LOAD1:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128))
; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD1]](s128)
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[LOAD]], [[UV]]
; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[UV]]
@ -114,8 +114,8 @@ body: |
; CHECK: $q0 = COPY [[MV]](s128)
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s128) = G_ZEXTLOAD %0:_(p0) :: (load 8)
%2:_(s128) = G_LOAD %0:_(p0) :: (load 16)
%1:_(s128) = G_ZEXTLOAD %0:_(p0) :: (load (s64))
%2:_(s128) = G_LOAD %0:_(p0) :: (load (s128))
%3:_(s128) = G_MUL %1:_, %2:_
$q0 = COPY %3
RET_ReallyLR

View File

@ -42,13 +42,13 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
; CHECK: $x0 = COPY [[LDRXroX]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
%4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
$x0 = COPY %4(s64)
RET_ReallyLR implicit $x0
...
@ -67,13 +67,13 @@ body: |
; CHECK: liveins: $d0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
; CHECK: $d0 = COPY [[LDRDroX]]
; CHECK: RET_ReallyLR implicit $d0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
%4:fpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
$d0 = COPY %4(s64)
RET_ReallyLR implicit $d0
...
@ -94,7 +94,7 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
; CHECK: $x0 = COPY [[ADDXrr1]]
@ -102,7 +102,7 @@ body: |
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s64) = G_LOAD %2(p0) :: (load 8 from %ir.addr)
%4:gpr(s64) = G_LOAD %2(p0) :: (load (s64) from %ir.addr)
%5:gpr(s64) = G_PTRTOINT %2
%6:gpr(s64) = G_ADD %5, %4
$x0 = COPY %6(s64)
@ -123,7 +123,7 @@ body: |
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: $x2 = COPY [[LDRXroX]]
; CHECK: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
@ -131,7 +131,7 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$x2 = COPY %5(s64)
RET_ReallyLR implicit $x2
@ -150,7 +150,7 @@ body: |
; CHECK: liveins: $x0, $x1, $d2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: $d2 = COPY [[LDRDroX]]
; CHECK: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
@ -158,7 +158,7 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -177,7 +177,7 @@ body: |
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: $x2 = COPY [[LDRXroX]]
; CHECK: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
@ -185,7 +185,7 @@ body: |
%2:gpr(s64) = G_MUL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$x2 = COPY %5(s64)
RET_ReallyLR implicit $x2
@ -204,7 +204,7 @@ body: |
; CHECK: liveins: $x0, $x1, $d2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: $d2 = COPY [[LDRDroX]]
; CHECK: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
@ -212,7 +212,7 @@ body: |
%2:gpr(s64) = G_MUL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -231,7 +231,7 @@ body: |
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: $x2 = COPY [[LDRXroX]]
; CHECK: RET_ReallyLR implicit $x2
%0:gpr(s64) = COPY $x0
@ -239,7 +239,7 @@ body: |
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$x2 = COPY %5(s64)
RET_ReallyLR implicit $x2
@ -258,7 +258,7 @@ body: |
; CHECK: liveins: $x0, $x1, $d2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: $d2 = COPY [[LDRDroX]]
; CHECK: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
@ -266,7 +266,7 @@ body: |
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -290,7 +290,7 @@ body: |
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
; CHECK: $d2 = COPY [[LDRDroX]]
; CHECK: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
@ -298,7 +298,7 @@ body: |
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -322,7 +322,7 @@ body: |
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
; CHECK: $d2 = COPY [[LDRDroX]]
; CHECK: RET_ReallyLR implicit $d2
%0:gpr(s64) = COPY $x0
@ -330,7 +330,7 @@ body: |
%2:gpr(s64) = G_MUL %1, %0(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:fpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
$d2 = COPY %5(s64)
RET_ReallyLR implicit $d2
@ -352,7 +352,7 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
; CHECK: $x2 = COPY [[ADDXrr]]
@ -362,7 +362,7 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%6:gpr(s64) = G_ADD %2, %1
%7:gpr(s64) = G_ADD %5, %6
$x2 = COPY %7(s64)
@ -387,7 +387,7 @@ body: |
; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
@ -399,7 +399,7 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%6:gpr(s64) = G_ADD %2, %1
%7:gpr(s64) = G_ADD %5, %6
%8:gpr(s64) = G_PTRTOINT %4
@ -424,8 +424,8 @@ body: |
; CHECK: liveins: $x0, $x1, $x2
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
; CHECK: $x2 = COPY [[ADDXrr]]
; CHECK: RET_ReallyLR implicit $x2
@ -434,8 +434,8 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%6:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%7:gpr(s64) = G_ADD %5, %6
$x2 = COPY %7(s64)
RET_ReallyLR implicit $x2
@ -458,8 +458,8 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load 8 from %ir.addr)
; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
; CHECK: $x2 = COPY [[ADDXrr]]
; CHECK: RET_ReallyLR implicit $x2
@ -468,8 +468,8 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%6:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%6:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%7:gpr(s64) = G_ADD %5, %6
$x2 = COPY %7(s64)
RET_ReallyLR implicit $x2
@ -494,7 +494,7 @@ body: |
; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr)
; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
@ -505,7 +505,7 @@ body: |
%2:gpr(s64) = G_SHL %0, %1(s64)
%3:gpr(p0) = COPY $x1
%4:gpr(p0) = G_PTR_ADD %3, %2
%5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr)
%5:gpr(s64) = G_LOAD %4(p0) :: (load (s64) from %ir.addr)
%6:gpr(s64) = G_ADD %2, %1
%7:gpr(s64) = G_ADD %5, %6
%8:gpr(s64) = G_PTRTOINT %4
@ -527,13 +527,13 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load 4 from %ir.addr)
; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
; CHECK: $w2 = COPY [[LDRWroX]]
; CHECK: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
%4:gpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
$w2 = COPY %4(s32)
RET_ReallyLR implicit $w2
...
@ -551,13 +551,13 @@ body: |
; CHECK: liveins: $d0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load 4 from %ir.addr)
; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
; CHECK: $s2 = COPY [[LDRSroX]]
; CHECK: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(s32) = G_LOAD %2(p0) :: (load 4 from %ir.addr)
%4:fpr(s32) = G_LOAD %2(p0) :: (load (s32) from %ir.addr)
$s2 = COPY %4(s32)
RET_ReallyLR implicit $h2
...
@ -575,13 +575,13 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load 2 from %ir.addr)
; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
; CHECK: $h2 = COPY [[LDRHroX]]
; CHECK: RET_ReallyLR implicit $h2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(s16) = G_LOAD %2(p0) :: (load 2 from %ir.addr)
%4:fpr(s16) = G_LOAD %2(p0) :: (load (s16) from %ir.addr)
$h2 = COPY %4(s16)
RET_ReallyLR implicit $h2
...
@ -599,13 +599,13 @@ body: |
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load 1 from %ir.addr)
; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
; CHECK: $w2 = COPY [[LDRBBroX]]
; CHECK: RET_ReallyLR implicit $w2
%0:gpr(p0) = COPY $x0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:gpr(s32) = G_LOAD %2(p0) :: (load 1 from %ir.addr)
%4:gpr(s32) = G_LOAD %2(p0) :: (load (s8) from %ir.addr)
$w2 = COPY %4(s32)
RET_ReallyLR implicit $w2
...
@ -623,12 +623,12 @@ body: |
; CHECK: liveins: $d0, $x1
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load 16 from %ir.addr)
; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
; CHECK: $q0 = COPY [[LDRQroX]]
; CHECK: RET_ReallyLR implicit $q0
%0:gpr(p0) = COPY $d0
%1:gpr(s64) = COPY $x1
%2:gpr(p0) = G_PTR_ADD %0, %1
%4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load 16 from %ir.addr)
%4:fpr(<2 x s64>) = G_LOAD %2(p0) :: (load (<2 x s64>) from %ir.addr)
$q0 = COPY %4(<2 x s64>)
RET_ReallyLR implicit $q0

View File

@ -22,7 +22,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load 4)
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load (s32))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%base:gpr(p0) = COPY $x0
@ -31,7 +31,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 2
%offset:gpr(s64) = G_SHL %ext, %c
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 4)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -56,7 +56,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load 4)
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%base:gpr(p0) = COPY $x0
@ -65,7 +65,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 2
%offset:gpr(s64) = G_SHL %ext, %c
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 4)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -90,7 +90,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load 4)
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%base:gpr(p0) = COPY $x0
@ -99,7 +99,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 2
%offset:gpr(s64) = G_SHL %ext, %c
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 4)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -122,7 +122,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load 4)
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 1, 1 :: (load (s32))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%base:gpr(p0) = COPY $x0
@ -131,7 +131,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 4
%offset:gpr(s64) = G_MUL %c, %ext
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 4)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -154,7 +154,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load 4)
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%base:gpr(p0) = COPY $x0
@ -163,7 +163,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 4
%offset:gpr(s64) = G_MUL %c, %ext
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 4)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -186,7 +186,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load 4)
; CHECK: %load:gpr32 = LDRWroW %base, %foo, 0, 1 :: (load (s32))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%base:gpr(p0) = COPY $x0
@ -195,7 +195,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 4
%offset:gpr(s64) = G_MUL %c, %ext
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 4)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s32))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -218,7 +218,7 @@ body: |
; CHECK: liveins: $w1, $x0, $d0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:fpr64 = LDRDroW %base, %foo, 1, 1 :: (load 8)
; CHECK: %load:fpr64 = LDRDroW %base, %foo, 1, 1 :: (load (<2 x s32>))
; CHECK: $x0 = COPY %load
; CHECK: RET_ReallyLR implicit $x0
%base:gpr(p0) = COPY $x0
@ -227,7 +227,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 8
%offset:gpr(s64) = G_MUL %c, %ext
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:fpr(<2 x s32>) = G_LOAD %ptr(p0) :: (load 8)
%load:fpr(<2 x s32>) = G_LOAD %ptr(p0) :: (load (<2 x s32>))
$x0 = COPY %load(<2 x s32>)
RET_ReallyLR implicit $x0
...
@ -250,7 +250,7 @@ body: |
; CHECK: liveins: $w1, $x0, $d0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:gpr64 = LDRXroW %base, %foo, 1, 1 :: (load 8)
; CHECK: %load:gpr64 = LDRXroW %base, %foo, 1, 1 :: (load (s64))
; CHECK: $x0 = COPY %load
; CHECK: RET_ReallyLR implicit $x0
%base:gpr(p0) = COPY $x0
@ -259,7 +259,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 8
%offset:gpr(s64) = G_MUL %c, %ext
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load 8)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load (s64))
$x0 = COPY %load(s64)
RET_ReallyLR implicit $x0
...
@ -283,14 +283,14 @@ body: |
; CHECK: liveins: $x0, $w0, $w1
; CHECK: %val:gpr32 = COPY $w1
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %load:gpr32 = LDRBBroW %base, %val, 1, 0 :: (load 1)
; CHECK: %load:gpr32 = LDRBBroW %base, %val, 1, 0 :: (load (s8))
; CHECK: $w0 = COPY %load
; CHECK: RET_ReallyLR implicit $w0
%val:gpr(s32) = COPY $w1
%base:gpr(p0) = COPY $x0
%ext:gpr(s64) = G_SEXT %val(s32)
%ptr:gpr(p0) = G_PTR_ADD %base, %ext(s64)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load 1)
%load:gpr(s32) = G_LOAD %ptr(p0) :: (load (s8))
$w0 = COPY %load(s32)
RET_ReallyLR implicit $w0
...
@ -313,7 +313,7 @@ body: |
; CHECK: liveins: $w1, $x0
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %foo:gpr32 = COPY $w1
; CHECK: %load:fpr16 = LDRHroW %base, %foo, 1, 1 :: (load 2)
; CHECK: %load:fpr16 = LDRHroW %base, %foo, 1, 1 :: (load (s16))
; CHECK: $h0 = COPY %load
; CHECK: RET_ReallyLR implicit $h0
%base:gpr(p0) = COPY $x0
@ -322,7 +322,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 2
%offset:gpr(s64) = G_MUL %c, %ext
%ptr:gpr(p0) = G_PTR_ADD %base, %offset(s64)
%load:fpr(s16) = G_LOAD %ptr(p0) :: (load 2)
%load:fpr(s16) = G_LOAD %ptr(p0) :: (load (s16))
$h0 = COPY %load(s16)
RET_ReallyLR implicit $h0
...
@ -346,7 +346,7 @@ body: |
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %imp:gpr64 = IMPLICIT_DEF
; CHECK: %and:gpr64common = ANDXri %imp, 4103
; CHECK: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load 8)
; CHECK: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load (s64))
; CHECK: $x1 = COPY %load
; CHECK: RET_ReallyLR implicit $x1
%base:gpr(p0) = COPY $x0
@ -356,7 +356,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 8
%mul:gpr(s64) = G_MUL %c, %and
%ptr:gpr(p0) = G_PTR_ADD %base, %mul(s64)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load 8)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load (s64))
$x1 = COPY %load(s64)
RET_ReallyLR implicit $x1
...
@ -380,7 +380,7 @@ body: |
; CHECK: %base:gpr64sp = COPY $x0
; CHECK: %imp:gpr64 = IMPLICIT_DEF
; CHECK: %and:gpr64common = ANDXri %imp, 4111
; CHECK: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load 8)
; CHECK: %load:gpr64 = LDRXroX %base, %and, 0, 1 :: (load (s64))
; CHECK: $x1 = COPY %load
; CHECK: RET_ReallyLR implicit $x1
%base:gpr(p0) = COPY $x0
@ -390,7 +390,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 8
%mul:gpr(s64) = G_MUL %c, %and
%ptr:gpr(p0) = G_PTR_ADD %base, %mul(s64)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load 8)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load (s64))
$x1 = COPY %load(s64)
RET_ReallyLR implicit $x1
...
@ -414,7 +414,7 @@ body: |
; CHECK: %imp:gpr64 = IMPLICIT_DEF
; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY %imp.sub_32
; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
; CHECK: %load:gpr64 = LDRXroW %base, [[COPY1]], 0, 1 :: (load 8)
; CHECK: %load:gpr64 = LDRXroW %base, [[COPY1]], 0, 1 :: (load (s64))
; CHECK: $x1 = COPY %load
; CHECK: RET_ReallyLR implicit $x1
%base:gpr(p0) = COPY $x0
@ -424,7 +424,7 @@ body: |
%c:gpr(s64) = G_CONSTANT i64 8
%mul:gpr(s64) = G_MUL %c, %and
%ptr:gpr(p0) = G_PTR_ADD %base, %mul(s64)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load 8)
%load:gpr(s64) = G_LOAD %ptr(p0) :: (load (s64))
$x1 = COPY %load(s64)
RET_ReallyLR implicit $x1
...
@ -448,7 +448,7 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
; CHECK: [[ANDWri:%[0-9]+]]:gpr32common = ANDWri [[COPY]], 7
; CHECK: [[LDRWroW:%[0-9]+]]:gpr32 = LDRWroW [[COPY1]], [[ANDWri]], 0, 1 :: (load 4)
; CHECK: [[LDRWroW:%[0-9]+]]:gpr32 = LDRWroW [[COPY1]], [[ANDWri]], 0, 1 :: (load (s32))
; CHECK: $w0 = COPY [[LDRWroW]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
@ -459,7 +459,7 @@ body: |
%12:gpr(s32) = G_SHL %3, %13(s64)
%6:gpr(s64) = G_ZEXT %12(s32)
%7:gpr(p0) = G_PTR_ADD %1, %6(s64)
%9:gpr(s32) = G_LOAD %7(p0) :: (load 4)
%9:gpr(s32) = G_LOAD %7(p0) :: (load (s32))
$w0 = COPY %9(s32)
RET_ReallyLR implicit $w0

View File

@ -24,21 +24,22 @@ define i32 @foo() {
; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
; CHECK: [[GV2:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
; CHECK: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @var1)
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load (s32) from @var1)
; CHECK: [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[LOAD]](s32), [[C3]]
; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: G_BRCOND [[TRUNC]](s1), %bb.3
; CHECK: G_BR %bb.2
; CHECK: bb.2.if.then:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: [[GV3:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
; CHECK: [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
; CHECK: G_STORE [[C4]](s32), [[GV3]](p0) :: (store 4 into @var2)
; CHECK: G_STORE [[C4]](s32), [[GV3]](p0) :: (store (s32) into @var2)
; CHECK: [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
; CHECK: G_STORE [[C5]](s32), [[GV]](p0) :: (store 4 into @var1)
; CHECK: G_STORE [[C5]](s32), [[GV]](p0) :: (store (s32) into @var1)
; CHECK: [[GV4:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
; CHECK: G_STORE [[C4]](s32), [[GV4]](p0) :: (store 4 into @var3)
; CHECK: G_STORE [[C5]](s32), [[GV]](p0) :: (store 4 into @var1)
; CHECK: G_STORE [[C4]](s32), [[GV4]](p0) :: (store (s32) into @var3)
; CHECK: G_STORE [[C5]](s32), [[GV]](p0) :: (store (s32) into @var1)
; CHECK: bb.3.if.end:
; CHECK: [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: $w0 = COPY [[C6]](s32)
@ -74,16 +75,17 @@ define i32 @darwin_tls() {
; CHECK: [[GV1:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: [[GV2:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (dereferenceable load 4 from @var1)
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (dereferenceable load (s32) from @var1)
; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[LOAD]](s32), [[C1]]
; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
; CHECK: G_BRCOND [[TRUNC]](s1), %bb.3
; CHECK: G_BR %bb.2
; CHECK: bb.2.if.then:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: [[LOAD1:%[0-9]+]]:gpr(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load 4 from @tls_gv)
; CHECK: [[LOAD1:%[0-9]+]]:gpr(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load (s32) from @tls_gv)
; CHECK: [[GV3:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
; CHECK: G_STORE [[LOAD1]](s32), [[GV3]](p0) :: (store 4 into @var2)
; CHECK: G_STORE [[LOAD1]](s32), [[GV3]](p0) :: (store (s32) into @var2)
; CHECK: bb.3.if.end:
; CHECK: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: $w0 = COPY [[C2]](s32)

View File

@ -358,7 +358,7 @@ body: |
; CHECK: [[GV1:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
; CHECK: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: [[GV2:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (load 4 from @var1)
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (load (s32) from @var1)
; CHECK: [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
@ -368,13 +368,13 @@ body: |
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[GV3:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var2
; CHECK: [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
; CHECK: G_STORE [[C4]](s32), [[GV3]](p0) :: (store 4 into @var2)
; CHECK: G_STORE [[C4]](s32), [[GV3]](p0) :: (store (s32) into @var2)
; CHECK: [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
; CHECK: [[GV4:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var1
; CHECK: G_STORE [[C5]](s32), [[GV4]](p0) :: (store 4 into @var1)
; CHECK: G_STORE [[C5]](s32), [[GV4]](p0) :: (store (s32) into @var1)
; CHECK: [[GV5:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @var3
; CHECK: G_STORE [[C4]](s32), [[GV5]](p0) :: (store 4 into @var3)
; CHECK: G_STORE [[C5]](s32), [[GV4]](p0) :: (store 4 into @var1)
; CHECK: G_STORE [[C4]](s32), [[GV5]](p0) :: (store (s32) into @var3)
; CHECK: G_STORE [[C5]](s32), [[GV4]](p0) :: (store (s32) into @var1)
; CHECK: bb.2.if.end:
; CHECK: [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: $w0 = COPY [[C6]](s32)
@ -390,17 +390,17 @@ body: |
%6:gpr(s32) = G_CONSTANT i32 3
%7:gpr(p0) = G_GLOBAL_VALUE @var3
%8:gpr(s32) = G_CONSTANT i32 0
%0:gpr(s32) = G_LOAD %1(p0) :: (load 4 from @var1)
%0:gpr(s32) = G_LOAD %1(p0) :: (load (s32) from @var1)
%9:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
%3:gpr(s1) = G_TRUNC %9(s32)
G_BRCOND %3(s1), %bb.2
G_BR %bb.3
bb.2.if.then:
G_STORE %4(s32), %5(p0) :: (store 4 into @var2)
G_STORE %6(s32), %1(p0) :: (store 4 into @var1)
G_STORE %4(s32), %7(p0) :: (store 4 into @var3)
G_STORE %6(s32), %1(p0) :: (store 4 into @var1)
G_STORE %4(s32), %5(p0) :: (store (s32) into @var2)
G_STORE %6(s32), %1(p0) :: (store (s32) into @var1)
G_STORE %4(s32), %7(p0) :: (store (s32) into @var3)
G_STORE %6(s32), %1(p0) :: (store (s32) into @var1)
bb.3.if.end:
$w0 = COPY %8(s32)
@ -425,7 +425,7 @@ body: |
; CHECK: [[ADRP2:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
; CHECK: %addlow3:gpr(p0) = G_ADD_LOW [[ADRP2]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
; CHECK: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[ADRP]](p0) :: (load 4 from @var1)
; CHECK: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[ADRP]](p0) :: (load (s32) from @var1)
; CHECK: [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
; CHECK: [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(eq), [[LOAD]](s32), [[C3]]
; CHECK: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
@ -436,15 +436,15 @@ body: |
; CHECK: [[ADRP3:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var2
; CHECK: [[ADD_LOW:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP3]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var2
; CHECK: [[C4:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 2
; CHECK: G_STORE [[C4]](s32), [[ADD_LOW]](p0) :: (store 4 into @var2)
; CHECK: G_STORE [[C4]](s32), [[ADD_LOW]](p0) :: (store (s32) into @var2)
; CHECK: [[C5:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 3
; CHECK: [[ADRP4:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var1
; CHECK: [[ADD_LOW1:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP4]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var1
; CHECK: G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store 4 into @var1)
; CHECK: G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store (s32) into @var1)
; CHECK: [[ADRP5:%[0-9]+]]:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
; CHECK: [[ADD_LOW2:%[0-9]+]]:gpr(p0) = G_ADD_LOW [[ADRP5]](p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
; CHECK: G_STORE [[C4]](s32), [[ADD_LOW2]](p0) :: (store 4 into @var3)
; CHECK: G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store 4 into @var1)
; CHECK: G_STORE [[C4]](s32), [[ADD_LOW2]](p0) :: (store (s32) into @var3)
; CHECK: G_STORE [[C5]](s32), [[ADD_LOW1]](p0) :: (store (s32) into @var1)
; CHECK: bb.2.if.end:
; CHECK: [[C6:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; CHECK: $w0 = COPY [[C6]](s32)
@ -462,17 +462,17 @@ body: |
%7:gpr64(p0) = ADRP target-flags(aarch64-page) @var3
%addlow3:gpr(p0) = G_ADD_LOW %7(p0), target-flags(aarch64-pageoff, aarch64-nc) @var3
%8:gpr(s32) = G_CONSTANT i32 0
%0:gpr(s32) = G_LOAD %1(p0) :: (load 4 from @var1)
%0:gpr(s32) = G_LOAD %1(p0) :: (load (s32) from @var1)
%9:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
%3:gpr(s1) = G_TRUNC %9(s32)
G_BRCOND %3(s1), %bb.2
G_BR %bb.3
bb.2.if.then:
G_STORE %4(s32), %addlow2(p0) :: (store 4 into @var2)
G_STORE %6(s32), %addlow1(p0) :: (store 4 into @var1)
G_STORE %4(s32), %addlow3(p0) :: (store 4 into @var3)
G_STORE %6(s32), %addlow1(p0) :: (store 4 into @var1)
G_STORE %4(s32), %addlow2(p0) :: (store (s32) into @var2)
G_STORE %6(s32), %addlow1(p0) :: (store (s32) into @var1)
G_STORE %4(s32), %addlow3(p0) :: (store (s32) into @var3)
G_STORE %6(s32), %addlow1(p0) :: (store (s32) into @var1)
bb.3.if.end:
$w0 = COPY %8(s32)
@ -503,7 +503,7 @@ body: |
; CHECK: G_BR %bb.2
; CHECK: bb.1:
; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY]]
; CHECK: G_STORE [[ADD]](s32), [[COPY1]](p0) :: (store 4)
; CHECK: G_STORE [[ADD]](s32), [[COPY1]](p0) :: (store (s32)
; CHECK: [[C3:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 128
; CHECK: [[INTTOPTR2:%[0-9]+]]:gpr(p0) = G_INTTOPTR [[C3]](s64)
; CHECK: $x0 = COPY [[INTTOPTR2]](p0)
@ -530,7 +530,7 @@ body: |
bb.2:
%8:gpr(s32) = G_ADD %0, %0
G_STORE %8(s32), %1(p0) :: (store 4)
G_STORE %8(s32), %1(p0) :: (store (s32))
$x0 = COPY %3(p0)
RET_ReallyLR implicit $x0

View File

@ -23,13 +23,13 @@ body: |
; CHECK-LABEL: name: ld_zext_i24
; CHECK: liveins: $x0, $x1
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[COPY]](p0) :: (load 3 from %ir.ptr, align 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[COPY]](p0) :: (load (s24) from %ir.ptr, align 1)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
; CHECK: $w0 = COPY [[ZEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(p0) = COPY $x1
%2:_(s24) = G_LOAD %0(p0) :: (load 3 from %ir.ptr, align 1)
%2:_(s24) = G_LOAD %0(p0) :: (load (s24) from %ir.ptr, align 1)
%3:_(s32) = G_ZEXT %2(s24)
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0

View File

@ -105,7 +105,7 @@ body: |
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C1]]
; CHECK: G_STORE [[AND]](s32), [[COPY1]](p0) :: (store 4)
; CHECK: G_STORE [[AND]](s32), [[COPY1]](p0) :: (store (s32))
; CHECK: $w0 = COPY [[AND1]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(s32) = COPY $w0
@ -114,7 +114,7 @@ body: |
%4:_(s32) = G_CONSTANT i32 -128
%3:_(s32) = G_AND %0, %2
%5:_(s32) = G_AND %3, %4
G_STORE %3(s32), %1(p0) :: (store 4)
G_STORE %3(s32), %1(p0) :: (store (s32))
$w0 = COPY %5(s32)
RET_ReallyLR implicit $w0

View File

@ -13,10 +13,10 @@ body: |
; CHECK-LABEL: name: sextload
; CHECK: liveins: $x0
; CHECK: %x0:_(p0) = COPY $x0
; CHECK: %sextload:_(s32) = G_SEXTLOAD %x0(p0) :: (load 2)
; CHECK: %sextload:_(s32) = G_SEXTLOAD %x0(p0) :: (load (s16))
; CHECK: $w0 = COPY %sextload(s32)
%x0:_(p0) = COPY $x0
%sextload:_(s32) = G_SEXTLOAD %x0:_(p0) :: (load 2)
%sextload:_(s32) = G_SEXTLOAD %x0:_(p0) :: (load (s16))
%sext_inreg:_(s32) = G_SEXT_INREG %sextload:_(s32), 24
$w0 = COPY %sext_inreg(s32)
...

View File

@ -19,7 +19,7 @@ body: |
; CHECK: RET_ReallyLR
%0:_(p0) = COPY $x0
%1:_(s32) = G_IMPLICIT_DEF
G_STORE %1(s32), %0(p0) :: (store 4)
G_STORE %1(s32), %0(p0) :: (store (s32))
RET_ReallyLR
...

View File

@ -10,11 +10,11 @@ body: |
; CHECK-LABEL: name: truncstore_s8
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: %val:_(s32) = COPY $w1
; CHECK: G_STORE %val(s32), %ptr(p0) :: (store 1)
; CHECK: G_STORE %val(s32), %ptr(p0) :: (store (s8))
%ptr:_(p0) = COPY $x0
%val:_(s32) = COPY $w1
%trunc:_(s8) = G_TRUNC %val
G_STORE %trunc(s8), %ptr(p0) :: (store 1)
G_STORE %trunc(s8), %ptr(p0) :: (store (s8))
...
---
name: truncstore_vector
@ -26,9 +26,9 @@ body: |
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: %val:_(<4 x s32>) = COPY $q0
; CHECK: %trunc:_(<4 x s8>) = G_TRUNC %val(<4 x s32>)
; CHECK: G_STORE %trunc(<4 x s8>), %ptr(p0) :: (store 4)
; CHECK: G_STORE %trunc(<4 x s8>), %ptr(p0) :: (store (<4 x s8>))
%ptr:_(p0) = COPY $x0
%val:_(<4 x s32>) = COPY $q0
%trunc:_(<4 x s8>) = G_TRUNC %val
G_STORE %trunc(<4 x s8>), %ptr(p0) :: (store 4)
G_STORE %trunc(<4 x s8>), %ptr(p0) :: (store (<4 x s8>))
...

View File

@ -22,10 +22,10 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_zeroext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 1 from %ir.addr)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8) from %ir.addr)
; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
%1:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
%2:_(s32) = G_ZEXT %1
$w0 = COPY %2
...
@ -40,11 +40,11 @@ body: |
; because an anyexting load like s64 = G_LOAD %p (load 4) isn't legal.
; CHECK-LABEL: name: test_no_anyext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.addr)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.addr)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32)
; CHECK: $x0 = COPY [[ANYEXT]](s64)
%0:_(p0) = COPY $x0
%1:_(s32) = G_LOAD %0 :: (load 4 from %ir.addr)
%1:_(s32) = G_LOAD %0 :: (load (s32) from %ir.addr)
%2:_(s64) = G_ANYEXT %1
$x0 = COPY %2
...

View File

@ -39,12 +39,12 @@ body: |
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[SUBREG_TO_REG]], 60, 59
; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[UBFMXri]], 0, 0 :: (load 4)
; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[UBFMXri]], 0, 0 :: (load (s32))
; CHECK: [[COPY6:%[0-9]+]]:fpr32 = COPY [[DEF]]
; CHECK: [[FMULSrr:%[0-9]+]]:fpr32 = FMULSrr [[COPY6]], [[LDRSroX]]
; CHECK: [[COPY7:%[0-9]+]]:fpr32 = COPY [[DEF]]
; CHECK: [[FADDSrr:%[0-9]+]]:fpr32 = FADDSrr [[FMULSrr]], [[COPY7]]
; CHECK: STRSui [[FADDSrr]], [[COPY2]], 0 :: (store 4)
; CHECK: STRSui [[FADDSrr]], [[COPY2]], 0 :: (store (s32))
; CHECK: bb.2:
; CHECK: RET_ReallyLR
bb.1:
@ -70,12 +70,12 @@ body: |
bb.2:
%12:gpr64 = UBFMXri %8, 60, 59
%15:fpr32 = LDRSroX %0, %12, 0, 0 :: (load 4)
%15:fpr32 = LDRSroX %0, %12, 0, 0 :: (load (s32))
%31:fpr32 = COPY %3
%16:fpr32 = FMULSrr %31, %15
%32:fpr32 = COPY %3
%17:fpr32 = FADDSrr %16, %32
STRSui %17, %2, 0 :: (store 4)
STRSui %17, %2, 0 :: (store (s32))
bb.3:
RET_ReallyLR

View File

@ -30,7 +30,7 @@ body: |
; CHECK: [[UDIV:%[0-9]+]]:_(s64) = G_UDIV [[FREEZE]], [[C]]
; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[C1]](s64)
; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SHL]], [[UDIV]]
; CHECK: G_STORE [[ADD]](s64), [[COPY]](p0) :: (store 8)
; CHECK: G_STORE [[ADD]](s64), [[COPY]](p0) :: (store (s64))
; CHECK: bb.2:
bb.1:
liveins: $x0
@ -58,7 +58,7 @@ body: |
%16:_(s64) = G_UDIV %15, %4
%17:_(s64) = G_SHL %7, %12(s64)
%18:_(s64) = G_ADD %17, %16
G_STORE %18(s64), %0(p0) :: (store 8)
G_STORE %18(s64), %0(p0) :: (store (s64))
bb.3:

View File

@ -93,7 +93,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 2
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(sge), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -115,7 +115,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 3
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(sgt), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -137,7 +137,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(sle), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -160,7 +160,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 -1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(slt), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -182,7 +182,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 2
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(uge), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -204,7 +204,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 -1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(ugt), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -226,7 +226,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(ule), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -248,7 +248,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 0
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(ule), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -270,7 +270,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 0
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(eq), %load_eq_1(s32), %cst
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -314,7 +314,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 -1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(sge), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -336,7 +336,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(sgt), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -358,7 +358,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 3
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(sle), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -381,7 +381,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 2
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(slt), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -403,7 +403,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 0
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(uge), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -425,7 +425,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(ugt), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -447,7 +447,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 -1
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(ule), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -469,7 +469,7 @@ body: |
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 2
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !0)
%load_eq_1:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !0)
%cmp:_(s1) = G_ICMP intpred(ule), %cst, %load_eq_1(s32)
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -487,14 +487,14 @@ body: |
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: %cst:_(s32) = G_CONSTANT i32 1
; CHECK: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load 4,
; CHECK: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32),
; CHECK: %cmp:_(s1) = G_ICMP intpred(eq), %load_between_1_2(s32), %cst
; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
; CHECK: $w0 = COPY %cmp_ext(s32)
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 1
%load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !1)
%load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !1)
%cmp:_(s1) = G_ICMP intpred(eq), %load_between_1_2(s32), %cst
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)
@ -512,14 +512,14 @@ body: |
; CHECK: liveins: $x0
; CHECK: %ptr:_(p0) = COPY $x0
; CHECK: %cst:_(s32) = G_CONSTANT i32 1
; CHECK: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load 4,
; CHECK: %load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32),
; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %load_between_1_2(s32), %cst
; CHECK: %cmp_ext:_(s32) = G_ZEXT %cmp(s1)
; CHECK: $w0 = COPY %cmp_ext(s32)
; CHECK: RET_ReallyLR implicit $w0
%ptr:_(p0) = COPY $x0
%cst:_(s32) = G_CONSTANT i32 1
%load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load 4, !range !1)
%load_between_1_2:_(s32) = G_LOAD %ptr(p0) :: (load (s32), !range !1)
%cmp:_(s1) = G_ICMP intpred(ne), %load_between_1_2(s32), %cst
%cmp_ext:_(s32) = G_ZEXT %cmp(s1)
$w0 = COPY %cmp_ext(s32)

View File

@ -15,7 +15,7 @@ body: |
; NOT_STRICT-LABEL: name: misaligned
; NOT_STRICT: liveins: $x0, $x1
; NOT_STRICT: %ptr:_(p0) = COPY $x1
; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; NOT_STRICT: $w1 = COPY %full_load(s32)
; NOT_STRICT: RET_ReallyLR implicit $w1
; STRICT-LABEL: name: misaligned
@ -24,8 +24,8 @@ body: |
; STRICT: %cst_16:_(s32) = G_CONSTANT i32 16
; STRICT: %ptr:_(p0) = COPY $x1
; STRICT: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; STRICT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; STRICT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; STRICT: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; STRICT: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; STRICT: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; STRICT: %full_load:_(s32) = G_OR %low_half, %high_half
; STRICT: $w1 = COPY %full_load(s32)
@ -36,8 +36,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2, align 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, align 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16), align 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), align 2)
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -55,13 +55,13 @@ body: |
; NOT_STRICT-LABEL: name: aligned
; NOT_STRICT: liveins: $x0, $x1
; NOT_STRICT: %ptr:_(p0) = COPY $x1
; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4)
; NOT_STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
; NOT_STRICT: $w1 = COPY %full_load(s32)
; NOT_STRICT: RET_ReallyLR implicit $w1
; STRICT-LABEL: name: aligned
; STRICT: liveins: $x0, $x1
; STRICT: %ptr:_(p0) = COPY $x1
; STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4)
; STRICT: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32))
; STRICT: $w1 = COPY %full_load(s32)
; STRICT: RET_ReallyLR implicit $w1
%cst_1:_(s64) = G_CONSTANT i64 1
@ -70,8 +70,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2, align 4)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, align 4)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16), align 4)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), align 4)
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half

View File

@ -28,13 +28,13 @@ body: |
; LITTLE-LABEL: name: s8_loads_to_s32_little_endian_pat
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 1)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: s8_loads_to_s32_little_endian_pat
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 1)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
@ -51,11 +51,11 @@ body: |
%ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
@ -94,14 +94,14 @@ body: |
; LITTLE-LABEL: name: s8_loads_to_s32_big_endian_pat
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 1)
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: s8_loads_to_s32_big_endian_pat
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 1)
; BIG: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
%cst_1:_(s32) = G_CONSTANT i32 1
@ -117,14 +117,14 @@ body: |
%ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
%elt0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%byte0:_(s32) = nuw G_SHL %elt0, %cst_24(s32)
%byte1:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%byte2:_(s32) = nuw G_SHL %elt2, %cst_8(s32)
%byte3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%byte3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
%or1:_(s32) = G_OR %byte0, %byte1
%or2:_(s32) = G_OR %byte2, %byte3
@ -152,13 +152,13 @@ body: |
; LITTLE-LABEL: name: different_or_pattern
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 1)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: different_or_pattern
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 1)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 1)
; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
@ -175,11 +175,11 @@ body: |
%ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
@ -219,13 +219,13 @@ body: |
; LITTLE-LABEL: name: s16_loads_to_s32_little_endian_pat
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: s16_loads_to_s32_little_endian_pat
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
@ -235,8 +235,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -260,14 +260,14 @@ body: |
; LITTLE-LABEL: name: s16_loads_to_s32_big_endian_pat
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: s16_loads_to_s32_big_endian_pat
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; BIG: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
%cst_1:_(s64) = G_CONSTANT i64 1
@ -276,9 +276,9 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%elt0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt0, %cst_16(s32)
%low_half:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%full_load:_(s32) = G_OR %low_half, %high_half
$w1 = COPY %full_load(s32)
@ -301,13 +301,13 @@ body: |
; LITTLE-LABEL: name: s16_loads_to_s64_little_endian_pat
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load 8, align 2)
; LITTLE: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
; LITTLE: $x1 = COPY %full_load(s64)
; LITTLE: RET_ReallyLR implicit $x1
; BIG-LABEL: name: s16_loads_to_s64_little_endian_pat
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load 8, align 2)
; BIG: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
; BIG: %full_load:_(s64) = G_BSWAP [[LOAD]]
; BIG: $x1 = COPY %full_load(s64)
; BIG: RET_ReallyLR implicit $x1
@ -324,11 +324,11 @@ body: |
%ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
%byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt2:_(s64) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 2)
%elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 2)
%elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%elt2:_(s64) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s16))
%elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
%byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
%byte4_byte5:_(s64) = nuw G_SHL %elt2, %cst_32(s64)
@ -358,14 +358,14 @@ body: |
; LITTLE-LABEL: name: s16_loads_to_s64_big_endian_pat
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load 8, align 2)
; LITTLE: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
; LITTLE: %full_load:_(s64) = G_BSWAP [[LOAD]]
; LITTLE: $x1 = COPY %full_load(s64)
; LITTLE: RET_ReallyLR implicit $x1
; BIG-LABEL: name: s16_loads_to_s64_big_endian_pat
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load 8, align 2)
; BIG: %full_load:_(s64) = G_LOAD %ptr(p0) :: (load (s64), align 2)
; BIG: $x1 = COPY %full_load(s64)
; BIG: RET_ReallyLR implicit $x1
%cst_1:_(s64) = G_CONSTANT i64 1
@ -381,14 +381,14 @@ body: |
%ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s64)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
%elt0:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt2:_(s64) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 2)
%elt0:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%elt2:_(s64) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s16))
%byte0_byte1:_(s64) = nuw G_SHL %elt0, %cst_48(s64)
%byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_32(s64)
%byte4_byte5:_(s64) = nuw G_SHL %elt2, %cst_16(s64)
%byte6_byte7:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 2)
%byte6_byte7:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
%or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
%or2:_(s64) = G_OR %byte4_byte5, %byte6_byte7
@ -417,7 +417,7 @@ body: |
; LITTLE: %cst_1:_(s32) = G_CONSTANT i32 1
; LITTLE: %ptr:_(p0) = COPY $x0
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load 4, align 1)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: nonzero_start_idx_positive_little_endian_pat
@ -425,7 +425,7 @@ body: |
; BIG: %cst_1:_(s32) = G_CONSTANT i32 1
; BIG: %ptr:_(p0) = COPY $x0
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load 4, align 1)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
@ -444,11 +444,11 @@ body: |
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
%ptr_elt_4:_(p0) = G_PTR_ADD %ptr, %cst_4(s32)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%elt4:_(s32) = G_ZEXTLOAD %ptr_elt_4(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
%elt4:_(s32) = G_ZEXTLOAD %ptr_elt_4(p0) :: (load (s8))
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt2, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt3, %cst_16(s32)
%byte3:_(s32) = nuw G_SHL %elt4, %cst_24(s32)
@ -479,7 +479,7 @@ body: |
; LITTLE: %cst_1:_(s32) = G_CONSTANT i32 1
; LITTLE: %ptr:_(p0) = COPY $x0
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load 4, align 1)
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
@ -488,7 +488,7 @@ body: |
; BIG: %cst_1:_(s32) = G_CONSTANT i32 1
; BIG: %ptr:_(p0) = COPY $x0
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; BIG: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load 4, align 1)
; BIG: %full_load:_(s32) = G_LOAD %ptr_elt_1(p0) :: (load (s32), align 1)
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
%cst_1:_(s32) = G_CONSTANT i32 1
@ -506,11 +506,11 @@ body: |
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
%ptr_elt_4:_(p0) = G_PTR_ADD %ptr, %cst_4(s32)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_4(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_4(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt3, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
%byte3:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
@ -541,7 +541,7 @@ body: |
; LITTLE: %cst_neg_3:_(s32) = G_CONSTANT i32 -3
; LITTLE: %ptr:_(p0) = COPY $x0
; LITTLE: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s32)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load 4, align 1)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: nonzero_start_idx_negative_little_endian_pat
@ -549,7 +549,7 @@ body: |
; BIG: %cst_neg_3:_(s32) = G_CONSTANT i32 -3
; BIG: %ptr:_(p0) = COPY $x0
; BIG: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s32)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load 4, align 1)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
@ -566,11 +566,11 @@ body: |
%ptr_elt_neg_2:_(p0) = G_PTR_ADD %ptr, %cst_neg_2(s32)
%ptr_elt_neg_1:_(p0) = G_PTR_ADD %ptr, %cst_neg_1(s32)
%elt_neg_2:_(s32) = G_ZEXTLOAD %ptr_elt_neg_2(p0) :: (load 1)
%elt_neg_1:_(s32) = G_ZEXTLOAD %ptr_elt_neg_1(p0) :: (load 1)
%elt_0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%elt_neg_2:_(s32) = G_ZEXTLOAD %ptr_elt_neg_2(p0) :: (load (s8))
%elt_neg_1:_(s32) = G_ZEXTLOAD %ptr_elt_neg_1(p0) :: (load (s8))
%elt_0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_neg_3(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_neg_3(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt_neg_2, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt_neg_1, %cst_16(s32)
%byte3:_(s32) = nuw G_SHL %elt_0, %cst_24(s32)
@ -601,7 +601,7 @@ body: |
; LITTLE: %cst_neg_3:_(s32) = G_CONSTANT i32 -3
; LITTLE: %ptr:_(p0) = COPY $x0
; LITTLE: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s32)
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load 4, align 1)
; LITTLE: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
; LITTLE: %full_load:_(s32) = G_BSWAP [[LOAD]]
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
@ -610,7 +610,7 @@ body: |
; BIG: %cst_neg_3:_(s32) = G_CONSTANT i32 -3
; BIG: %ptr:_(p0) = COPY $x0
; BIG: %ptr_elt_neg_3:_(p0) = G_PTR_ADD %ptr, %cst_neg_3(s32)
; BIG: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load 4, align 1)
; BIG: %full_load:_(s32) = G_LOAD %ptr_elt_neg_3(p0) :: (load (s32), align 1)
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
%cst_neg_1:_(s32) = G_CONSTANT i32 -1
@ -626,12 +626,12 @@ body: |
%ptr_elt_neg_2:_(p0) = G_PTR_ADD %ptr, %cst_neg_2(s32)
%ptr_elt_neg_1:_(p0) = G_PTR_ADD %ptr, %cst_neg_1(s32)
%elt_neg_3:_(s32) = G_ZEXTLOAD %ptr_elt_neg_3(p0) :: (load 1)
%elt_neg_2:_(s32) = G_ZEXTLOAD %ptr_elt_neg_2(p0) :: (load 1)
%elt_neg_1:_(s32) = G_ZEXTLOAD %ptr_elt_neg_1(p0) :: (load 1)
%elt_0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%elt_neg_3:_(s32) = G_ZEXTLOAD %ptr_elt_neg_3(p0) :: (load (s8))
%elt_neg_2:_(s32) = G_ZEXTLOAD %ptr_elt_neg_2(p0) :: (load (s8))
%elt_neg_1:_(s32) = G_ZEXTLOAD %ptr_elt_neg_1(p0) :: (load (s8))
%elt_0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt_neg_1, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt_neg_2, %cst_16(s32)
%byte3:_(s32) = nuw G_SHL %elt_neg_3, %cst_24(s32)
@ -659,8 +659,8 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -671,8 +671,8 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -683,8 +683,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (volatile load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -707,8 +707,8 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; LITTLE: %high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -719,8 +719,8 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; BIG: %high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -731,8 +731,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%wrong_size_load:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%high_half:_(s32) = nuw G_SHL %wrong_size_load, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -758,8 +758,8 @@ body: |
; LITTLE: %cst_24:_(s32) = G_CONSTANT i32 24
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -770,8 +770,8 @@ body: |
; BIG: %cst_24:_(s32) = G_CONSTANT i32 24
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -782,8 +782,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_24(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -809,8 +809,8 @@ body: |
; LITTLE: %cst_8:_(s32) = G_CONSTANT i32 8
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -821,8 +821,8 @@ body: |
; BIG: %cst_8:_(s32) = G_CONSTANT i32 8
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -833,8 +833,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -863,9 +863,9 @@ body: |
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
; LITTLE: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 2)
; LITTLE: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
; LITTLE: %byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
; LITTLE: %byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
; LITTLE: %or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
@ -881,9 +881,9 @@ body: |
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
; BIG: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 2)
; BIG: %byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
; BIG: %byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
; BIG: %byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
; BIG: %or1:_(s64) = G_OR %byte0_byte1, %byte2_byte3
@ -900,10 +900,10 @@ body: |
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s64)
%byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%byte0_byte1:_(s64) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 2)
%elt1:_(s64) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%elt3:_(s64) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s16))
%byte2_byte3:_(s64) = nuw G_SHL %elt1, %cst_16(s64)
%byte6_byte7:_(s64) = nuw G_SHL %elt3, %cst_48(s64)
@ -930,8 +930,8 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, addrspace 1)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -942,8 +942,8 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, addrspace 1)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -954,8 +954,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2, addrspace 0)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2, addrspace 1)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16), addrspace 0)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16), addrspace 1)
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -986,10 +986,10 @@ body: |
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; LITTLE: %uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s32)
; LITTLE: %also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s32)
; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load 1)
; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load 1)
; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
; LITTLE: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
; LITTLE: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
; LITTLE: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
@ -1009,10 +1009,10 @@ body: |
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; BIG: %uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s32)
; BIG: %also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s32)
; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; BIG: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load 1)
; BIG: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load 1)
; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; BIG: %elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
; BIG: %elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
; BIG: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
; BIG: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
; BIG: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
@ -1033,11 +1033,11 @@ body: |
%uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s32)
%also_uses_idx_2:_(p0) = G_PTR_ADD %ptr, %reused_idx(s32)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load 1)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %uses_idx_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %also_uses_idx_2(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
%byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
@ -1073,10 +1073,10 @@ body: |
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; LITTLE: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
; LITTLE: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
; LITTLE: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
; LITTLE: %duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
; LITTLE: %duplicate_shl_2:_(s32) = nuw G_SHL %elt3, %duplicate_shl_cst(s32)
@ -1096,10 +1096,10 @@ body: |
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; BIG: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
; BIG: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; BIG: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
; BIG: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; BIG: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
; BIG: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
; BIG: %byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
; BIG: %duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
; BIG: %duplicate_shl_2:_(s32) = nuw G_SHL %elt3, %duplicate_shl_cst(s32)
@ -1120,11 +1120,11 @@ body: |
%ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
%byte1:_(s32) = nuw G_SHL %elt1, %cst_8(s32)
%duplicate_shl_1:_(s32) = nuw G_SHL %elt2, %duplicate_shl_cst(s32)
@ -1163,10 +1163,10 @@ body: |
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; LITTLE: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
; LITTLE: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
; LITTLE: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
; LITTLE: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
; LITTLE: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; LITTLE: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
; LITTLE: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
; LITTLE: %byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
; LITTLE: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
; LITTLE: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
@ -1187,10 +1187,10 @@ body: |
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s32)
; BIG: %ptr_elt_2:_(p0) = G_PTR_ADD %ptr, %cst_2(s32)
; BIG: %ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
; BIG: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
; BIG: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
; BIG: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
; BIG: %lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
; BIG: %byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
; BIG: %elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
; BIG: %elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
; BIG: %byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
; BIG: %byte2:_(s32) = nuw G_SHL %elt2, %cst_16(s32)
; BIG: %byte3:_(s32) = nuw G_SHL %elt3, %cst_24(s32)
@ -1213,10 +1213,10 @@ body: |
%ptr_elt_3:_(p0) = G_PTR_ADD %ptr, %cst_3(s32)
; This load is index 0
%lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 1)
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 1)
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load 1)
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load 1)
%lowest_idx_load:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s8))
%byte0:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s8))
%elt2:_(s32) = G_ZEXTLOAD %ptr_elt_2(p0) :: (load (s8))
%elt3:_(s32) = G_ZEXTLOAD %ptr_elt_3(p0) :: (load (s8))
; ... But it ends up being shifted, so we shouldn't combine.
%byte1:_(s32) = nuw G_SHL %lowest_idx_load, %cst_8(s32)
@ -1247,8 +1247,8 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: %extra_use:_(s32) = G_AND %full_load, %low_half
@ -1260,8 +1260,8 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: %extra_use:_(s32) = G_AND %full_load, %low_half
@ -1273,8 +1273,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -1299,8 +1299,8 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: %extra_use:_(s32) = G_AND %full_load, %high_half
@ -1312,8 +1312,8 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: %extra_use:_(s32) = G_AND %full_load, %high_half
@ -1325,8 +1325,8 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -1349,11 +1349,11 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: %other_ptr:_(p0) = COPY $x1
; LITTLE: %some_val:_(s32) = G_CONSTANT i32 12
; LITTLE: G_STORE %some_val(s32), %other_ptr(p0) :: (store 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -1364,11 +1364,11 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: %other_ptr:_(p0) = COPY $x1
; BIG: %some_val:_(s32) = G_CONSTANT i32 12
; BIG: G_STORE %some_val(s32), %other_ptr(p0) :: (store 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -1378,14 +1378,14 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; Memory could be modified here, so don't combine!
%other_ptr:_(p0) = COPY $x1
%some_val:_(s32) = G_CONSTANT i32 12
G_STORE %some_val, %other_ptr :: (store 2)
G_STORE %some_val, %other_ptr :: (store (s16))
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -1408,16 +1408,16 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: bb.1:
; LITTLE: successors: %bb.2(0x80000000)
; LITTLE: liveins: $x0, $x1
; LITTLE: %other_ptr:_(p0) = COPY $x1
; LITTLE: %some_val:_(s32) = G_CONSTANT i32 12
; LITTLE: G_STORE %some_val(s32), %other_ptr(p0) :: (store 2)
; LITTLE: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
; LITTLE: bb.2:
; LITTLE: liveins: $x0, $x1
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -1430,16 +1430,16 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: bb.1:
; BIG: successors: %bb.2(0x80000000)
; BIG: liveins: $x0, $x1
; BIG: %other_ptr:_(p0) = COPY $x1
; BIG: %some_val:_(s32) = G_CONSTANT i32 12
; BIG: G_STORE %some_val(s32), %other_ptr(p0) :: (store 2)
; BIG: G_STORE %some_val(s32), %other_ptr(p0) :: (store (s16))
; BIG: bb.2:
; BIG: liveins: $x0, $x1
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -1455,7 +1455,7 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
bb.1:
liveins: $x0, $x1
@ -1463,11 +1463,11 @@ body: |
; Memory could be modified here, so don't combine!
%other_ptr:_(p0) = COPY $x1
%some_val:_(s32) = G_CONSTANT i32 12
G_STORE %some_val, %other_ptr :: (store 2)
G_STORE %some_val, %other_ptr :: (store (s16))
bb.2:
liveins: $x0, $x1
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -1489,10 +1489,10 @@ body: |
; LITTLE: %cst_16:_(s32) = G_CONSTANT i32 16
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; LITTLE: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; LITTLE: bb.1:
; LITTLE: liveins: $x0, $x1
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; LITTLE: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; LITTLE: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; LITTLE: %full_load:_(s32) = G_OR %low_half, %high_half
; LITTLE: $w1 = COPY %full_load(s32)
@ -1505,10 +1505,10 @@ body: |
; BIG: %cst_16:_(s32) = G_CONSTANT i32 16
; BIG: %ptr:_(p0) = COPY $x1
; BIG: %ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
; BIG: %low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
; BIG: bb.1:
; BIG: liveins: $x0, $x1
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
; BIG: %elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
; BIG: %high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
; BIG: %full_load:_(s32) = G_OR %low_half, %high_half
; BIG: $w1 = COPY %full_load(s32)
@ -1523,11 +1523,11 @@ body: |
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
bb.1:
liveins: $x0, $x1
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half
@ -1546,24 +1546,24 @@ body: |
; LITTLE-LABEL: name: load_first
; LITTLE: liveins: $x0, $x1
; LITTLE: %ptr:_(p0) = COPY $x1
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; LITTLE: %full_load:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; LITTLE: $w1 = COPY %full_load(s32)
; LITTLE: RET_ReallyLR implicit $w1
; BIG-LABEL: name: load_first
; BIG: liveins: $x0, $x1
; BIG: %ptr:_(p0) = COPY $x1
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load 4, align 2)
; BIG: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD %ptr(p0) :: (load (s32), align 2)
; BIG: %full_load:_(s32) = G_BSWAP [[LOAD]]
; BIG: $w1 = COPY %full_load(s32)
; BIG: RET_ReallyLR implicit $w1
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load 2)
%low_half:_(s32) = G_ZEXTLOAD %ptr(p0) :: (load (s16))
%cst_1:_(s64) = G_CONSTANT i64 1
%cst_16:_(s32) = G_CONSTANT i32 16
%ptr:_(p0) = COPY $x1
%ptr_elt_1:_(p0) = G_PTR_ADD %ptr, %cst_1(s64)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load 2)
%elt1:_(s32) = G_ZEXTLOAD %ptr_elt_1(p0) :: (load (s16))
%high_half:_(s32) = nuw G_SHL %elt1, %cst_16(s32)
%full_load:_(s32) = G_OR %low_half, %high_half

View File

@ -24,19 +24,19 @@ body: |
; DARWIN: liveins: $x0, $x1
; DARWIN: %ptr:_(p0) = COPY $x0
; DARWIN: %width:_(s64) = COPY $x1
; DARWIN: G_BZERO %ptr(p0), %width(s64), 0 :: (store 4)
; DARWIN: G_BZERO %ptr(p0), %width(s64), 0 :: (store (s32))
; DARWIN: RET_ReallyLR
; UNKNOWN-LABEL: name: bzero_unknown_width
; UNKNOWN: liveins: $x0, $x1
; UNKNOWN: %ptr:_(p0) = COPY $x0
; UNKNOWN: %zero:_(s8) = G_CONSTANT i8 0
; UNKNOWN: %width:_(s64) = COPY $x1
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store 4)
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store (s32))
; UNKNOWN: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%zero:_(s8) = G_CONSTANT i8 0
%width:_(s64) = COPY $x1
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store 4)
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store (s32))
RET_ReallyLR
...
---
@ -49,19 +49,19 @@ body: |
; DARWIN: liveins: $x0, $x1
; DARWIN: %ptr:_(p0) = COPY $x0
; DARWIN: %width:_(s64) = COPY $x1
; DARWIN: G_BZERO %ptr(p0), %width(s64), 1 :: (store 4)
; DARWIN: G_BZERO %ptr(p0), %width(s64), 1 :: (store (s32))
; DARWIN: RET_ReallyLR
; UNKNOWN-LABEL: name: bzero_tail_unknown_width
; UNKNOWN: liveins: $x0, $x1
; UNKNOWN: %ptr:_(p0) = COPY $x0
; UNKNOWN: %zero:_(s8) = G_CONSTANT i8 0
; UNKNOWN: %width:_(s64) = COPY $x1
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 1 :: (store 4)
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 1 :: (store (s32))
; UNKNOWN: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%zero:_(s8) = G_CONSTANT i8 0
%width:_(s64) = COPY $x1
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 1 :: (store 4)
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 1 :: (store (s32))
RET_ReallyLR
...
---
@ -76,19 +76,19 @@ body: |
; DARWIN: liveins: $x0, $x1
; DARWIN: %ptr:_(p0) = COPY $x0
; DARWIN: %width:_(s64) = G_CONSTANT i64 1024
; DARWIN: G_BZERO %ptr(p0), %width(s64), 0 :: (store 4)
; DARWIN: G_BZERO %ptr(p0), %width(s64), 0 :: (store (s32))
; DARWIN: RET_ReallyLR
; UNKNOWN-LABEL: name: bzero_constant_width
; UNKNOWN: liveins: $x0, $x1
; UNKNOWN: %ptr:_(p0) = COPY $x0
; UNKNOWN: %zero:_(s8) = G_CONSTANT i8 0
; UNKNOWN: %width:_(s64) = G_CONSTANT i64 1024
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store 4)
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store (s32))
; UNKNOWN: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%zero:_(s8) = G_CONSTANT i8 0
%width:_(s64) = G_CONSTANT i64 1024
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store 4)
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store (s32))
RET_ReallyLR
...
---
@ -103,19 +103,19 @@ body: |
; DARWIN: liveins: $x0, $x1
; DARWIN: %ptr:_(p0) = COPY $x0
; DARWIN: %width:_(s64) = G_CONSTANT i64 256
; DARWIN: G_BZERO %ptr(p0), %width(s64), 0 :: (store 4)
; DARWIN: G_BZERO %ptr(p0), %width(s64), 0 :: (store (s32))
; DARWIN: RET_ReallyLR
; UNKNOWN-LABEL: name: bzero_constant_width_minsize
; UNKNOWN: liveins: $x0, $x1
; UNKNOWN: %ptr:_(p0) = COPY $x0
; UNKNOWN: %zero:_(s8) = G_CONSTANT i8 0
; UNKNOWN: %width:_(s64) = G_CONSTANT i64 256
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store 4)
; UNKNOWN: G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store (s32))
; UNKNOWN: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%zero:_(s8) = G_CONSTANT i8 0
%width:_(s64) = G_CONSTANT i64 256
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store 4)
G_MEMSET %ptr(p0), %zero(s8), %width(s64), 0 :: (store (s32))
RET_ReallyLR
...
---
@ -131,17 +131,17 @@ body: |
; DARWIN: %ptr:_(p0) = COPY $x0
; DARWIN: %not_zero:_(s8) = G_CONSTANT i8 1
; DARWIN: %width:_(s64) = G_CONSTANT i64 256
; DARWIN: G_MEMSET %ptr(p0), %not_zero(s8), %width(s64), 0 :: (store 4)
; DARWIN: G_MEMSET %ptr(p0), %not_zero(s8), %width(s64), 0 :: (store (s32))
; DARWIN: RET_ReallyLR
; UNKNOWN-LABEL: name: not_zero
; UNKNOWN: liveins: $x0, $x1
; UNKNOWN: %ptr:_(p0) = COPY $x0
; UNKNOWN: %not_zero:_(s8) = G_CONSTANT i8 1
; UNKNOWN: %width:_(s64) = G_CONSTANT i64 256
; UNKNOWN: G_MEMSET %ptr(p0), %not_zero(s8), %width(s64), 0 :: (store 4)
; UNKNOWN: G_MEMSET %ptr(p0), %not_zero(s8), %width(s64), 0 :: (store (s32))
; UNKNOWN: RET_ReallyLR
%ptr:_(p0) = COPY $x0
%not_zero:_(s8) = G_CONSTANT i8 1
%width:_(s64) = G_CONSTANT i64 256
G_MEMSET %ptr(p0), %not_zero(s8), %width(s64), 0 :: (store 4)
G_MEMSET %ptr(p0), %not_zero(s8), %width(s64), 0 :: (store (s32))
RET_ReallyLR

View File

@ -75,7 +75,7 @@ body: |
; CHECK: [[T0:%[0-9]+]]:_(s32) = G_SEXTLOAD
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
%2:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
%3:_(s32) = G_SEXT %2
%4:_(s32) = G_CONSTANT i32 1
%5:_(s1) = G_ICMP intpred(ne), %1:_(s32), %4:_
@ -112,15 +112,15 @@ body: |
# a test of the debug output and a test.
#
# CHECK-WORKLIST-LABEL: Generic MI Combiner for: multiple_copies
# CHECK-WORKLIST: Try combining [[IN0:%[0-9]+]]:_(s8) = G_LOAD [[IN1:%[0-9]+]]:_(p0){{.*}} :: (load 1 from %ir.addr)
# CHECK-WORKLIST: Try combining [[IN0:%[0-9]+]]:_(s8) = G_LOAD [[IN1:%[0-9]+]]:_(p0){{.*}} :: (load (s8) from %ir.addr)
# CHECK-WORKLIST: Preferred use is: [[IN2:%[0-9]+]]:_(s32) = G_SEXT [[IN0]]:_(s8)
# CHECK-WORKLIST-DAG: Changing: [[IN0]]:_(s8) = G_LOAD [[IN1]]:_(p0){{.*}} :: (load 1 from %ir.addr)
# CHECK-WORKLIST-DAG: Changing: [[IN0]]:_(s8) = G_LOAD [[IN1]]:_(p0){{.*}} :: (load (s8) from %ir.addr)
# CHECK-WORKLIST-DAG: Changing: [[IN3:%[0-9]+]]:_(s8) = G_ADD [[IN0]]:_, [[IN4:%[0-9]+]]:_
# CHECK-WORKLIST-DAG: Changed: [[IN3]]:_(s8) = G_ADD [[NEW1:%[0-9]+]]:_, [[IN4]]:_
# CHECK-WORKLIST-DAG: Changing: [[IN5:%[0-9]+]]:_(s8) = G_SUB [[IN0]]:_, [[IN6:%[0-9]+]]:_
# CHECK-WORKLIST-DAG: Changed: [[IN5]]:_(s8) = G_SUB [[NEW2:%[0-9]+]]:_, [[IN6]]:_
# CHECK-WORKLIST-DAG: Erasing: [[IN2]]:_(s32) = G_SEXT [[IN0]]:_(s8)
# CHECK-WORKLIST-DAG: Changed: [[IN2]]:_(s32) = G_SEXTLOAD [[IN1]]:_(p0){{.*}} :: (load 1 from %ir.addr)
# CHECK-WORKLIST-DAG: Changed: [[IN2]]:_(s32) = G_SEXTLOAD [[IN1]]:_(p0){{.*}} :: (load (s8) from %ir.addr)
# CHECK-WORKLIST-DAG: Created: [[NEW1]]:_(s8) = G_TRUNC [[IN2]]:_(s32)
# CHECK-WORKLIST-DAG: Created: [[NEW2]]:_(s8) = G_TRUNC [[IN2]]:_(s32)
# CHECK-WORKLIST: Try combining
@ -140,7 +140,7 @@ body: |
; CHECK: [[T0:%[0-9]+]]:_(s32) = G_SEXTLOAD
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
%2:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
; CHECK: [[T4:%[0-9]+]]:_(s8) = G_TRUNC [[T0]](s32)
%3:_(s32) = G_SEXT %2
%4:_(s32) = G_CONSTANT i32 1
@ -178,7 +178,7 @@ body: |
; CHECK: [[T0:%[0-9]+]]:_(s32) = G_SEXTLOAD
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
%2:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
%3:_(s32) = G_CONSTANT i32 1
%4:_(s1) = G_ICMP intpred(ne), %1:_(s32), %3:_
G_BRCOND %4:_(s1), %bb.1
@ -211,9 +211,9 @@ body: |
$w0 = COPY %10
$w1 = COPY %11
# CHECK-WORKLIST-LABEL: Generic MI Combiner for: sink_to_phi_nondominating
# CHECK-WORKLIST: Try combining [[IN0:%[0-9]+]]:_(s8) = G_LOAD [[IN1:%[0-9]+]]:_(p0){{.*}} :: (load 1 from %ir.addr)
# CHECK-WORKLIST: Try combining [[IN0:%[0-9]+]]:_(s8) = G_LOAD [[IN1:%[0-9]+]]:_(p0){{.*}} :: (load (s8) from %ir.addr)
# CHECK-WORKLIST: Preferred use is: [[IN2:%[0-9]+]]:_(s32) = G_SEXT [[IN0]]:_(s8)
# CHECK-WORKLIST-DAG: Changing: [[IN0]]:_(s8) = G_LOAD [[IN1]]:_(p0){{.*}} :: (load 1 from %ir.addr)
# CHECK-WORKLIST-DAG: Changing: [[IN0]]:_(s8) = G_LOAD [[IN1]]:_(p0){{.*}} :: (load (s8) from %ir.addr)
# CHECK-WORKLIST-DAG: Creating: G_TRUNC
# CHECK-WORKLIST-DAG: Changing: [[IN3:%[0-9]+]]:_(s8) = G_ADD [[IN0]]:_, [[IN4:%[0-9]+]]:_
# CHECK-WORKLIST-DAG: Changed: [[IN3]]:_(s8) = G_ADD [[OUT1:%[0-9]+]]:_, [[IN4]]:_
@ -221,7 +221,7 @@ body: |
# CHECK-WORKLIST-DAG: Changing: [[IN5:%[0-9]+]]:_(s8) = G_SUB [[IN0]]:_, [[IN6:%[0-9]+]]:_
# CHECK-WORKLIST-DAG: Changed: [[IN5]]:_(s8) = G_SUB [[OUT2:%[0-9]+]]:_, [[IN6]]:_
# CHECK-WORKLIST-DAG: Erasing: [[IN2]]:_(s32) = G_SEXT [[IN0]]:_(s8)
# CHECK-WORKLIST-DAG: Changed: [[IN2]]:_(s32) = G_SEXTLOAD [[IN1]]:_(p0){{.*}} :: (load 1 from %ir.addr)
# CHECK-WORKLIST-DAG: Changed: [[IN2]]:_(s32) = G_SEXTLOAD [[IN1]]:_(p0){{.*}} :: (load (s8) from %ir.addr)
# CHECK-WORKLIST-DAG: Created: [[OUT1]]:_(s8) = G_TRUNC [[IN2]]:_(s32)
# CHECK-WORKLIST-DAG: Created: [[OUT2]]:_(s8) = G_TRUNC [[IN2]]:_(s32)
# CHECK-WORKLIST: Try combining
@ -238,7 +238,7 @@ body: |
; CHECK: [[T0:%[0-9]+]]:_(s32) = G_SEXTLOAD
%0:_(p0) = COPY $x0
%1:_(s32) = COPY $w1
%2:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
%2:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
%3:_(s32) = G_SEXT %2
%4:_(s32) = G_CONSTANT i32 1
%5:_(s1) = G_ICMP intpred(ne), %1:_(s32), %4:_
@ -287,10 +287,10 @@ body: |
bb.0.entry:
liveins: $x0, $w1
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
; CHECK: %1:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
G_STORE %1(s8), %0(p0) :: (store 1 into %ir.addr)
; CHECK: G_STORE %1(s8), %0(p0) :: (store 1 into %ir.addr)
%1:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
; CHECK: %1:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr)
G_STORE %1(s8), %0(p0) :: (store (s8) into %ir.addr)
; CHECK: G_STORE %1(s8), %0(p0) :: (store (s8) into %ir.addr)
...
---
@ -302,8 +302,8 @@ body: |
bb.0.entry:
liveins: $x0, $w1
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr)
; CHECK: %1:_(s8) = G_LOAD %0(p0) :: (load 1 from %ir.addr)
%1:_(s8) = G_LOAD %0 :: (load (s8) from %ir.addr)
; CHECK: %1:_(s8) = G_LOAD %0(p0) :: (load (s8) from %ir.addr)
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.hint), %1(s8)
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.hint), %1(s8)
...

View File

@ -24,12 +24,12 @@ body: |
; CHECK-LABEL: name: test
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.ptr)
; CHECK: [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[COPY]](p0) :: (load (s1) from %ir.ptr)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
; CHECK: $w0 = COPY [[ZEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s1) = G_LOAD %0(p0) :: (load 1 from %ir.ptr)
%1:_(s1) = G_LOAD %0(p0) :: (load (s1) from %ir.ptr)
%2:_(s8) = G_ZEXT %1(s1)
%3:_(s32) = G_ANYEXT %2(s8)
$w0 = COPY %3(s32)

View File

@ -7,10 +7,10 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_anyext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[LOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
$w0 = COPY %2
...
@ -22,10 +22,10 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_anyext_with_copy
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[LOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s8) = COPY %1
%3:_(s32) = G_ANYEXT %1
$w0 = COPY %3
@ -38,10 +38,10 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_signext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_SEXT %1
$w0 = COPY %2
...
@ -53,10 +53,10 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_zeroext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ZEXT %1
$w0 = COPY %2
...
@ -68,11 +68,11 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_2anyext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[LOAD]](s32)
; CHECK: $w1 = COPY [[LOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
%3:_(s32) = G_ANYEXT %1
$w0 = COPY %2
@ -86,11 +86,11 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_1anyext_1signext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
; CHECK: $w1 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
%3:_(s32) = G_SEXT %1
$w0 = COPY %2
@ -104,7 +104,7 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_1xor_1signext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[SEXTLOAD]](s32)
; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[TRUNC]], [[C]]
@ -112,7 +112,7 @@ body: |
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: $w1 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s8) = G_CONSTANT i8 -1
%3:_(s8) = G_XOR %1, %2
%5:_(s32) = G_ANYEXT %3
@ -128,11 +128,11 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_1anyext_1zeroext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
; CHECK: $w1 = COPY [[ZEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
%3:_(s32) = G_ZEXT %1
$w0 = COPY %2
@ -146,13 +146,13 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_1signext_1zeroext
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[SEXTLOAD]](s32)
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
; CHECK: $w0 = COPY [[ZEXT]](s32)
; CHECK: $w1 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ZEXT %1
%3:_(s32) = G_SEXT %1
$w0 = COPY %2
@ -166,12 +166,12 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_1anyext64_1signext32
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SEXTLOAD]](s32)
; CHECK: $x0 = COPY [[ANYEXT]](s64)
; CHECK: $w1 = COPY [[SEXTLOAD]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s64) = G_ANYEXT %1
%3:_(s32) = G_SEXT %1
$x0 = COPY %2
@ -185,13 +185,13 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_1anyext32_1signext64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[SEXTLOAD]](s64)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: $x1 = COPY [[SEXTLOAD]](s64)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
%3:_(s64) = G_SEXT %1
$w0 = COPY %2
@ -205,7 +205,7 @@ body: |
liveins: $x0
; CHECK-LABEL: name: test_2anyext32_1signext64
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[SEXTLOAD]](s64)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
@ -213,7 +213,7 @@ body: |
; CHECK: $x1 = COPY [[SEXTLOAD]](s64)
; CHECK: $w2 = COPY [[ANYEXT1]](s32)
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
%3:_(s64) = G_SEXT %1
%4:_(s32) = G_ANYEXT %1
@ -229,14 +229,14 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: $w0 = COPY [[LOAD]](s32)
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
G_BR %bb.1
bb.1:
%2:_(s32) = G_ANYEXT %1
@ -250,14 +250,14 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: $w0 = COPY [[SEXTLOAD]](s32)
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
G_BR %bb.1
bb.1:
%2:_(s32) = G_SEXT %1
@ -271,14 +271,14 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: $w0 = COPY [[ZEXTLOAD]](s32)
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
G_BR %bb.1
bb.1:
%2:_(s32) = G_ZEXT %1
@ -292,7 +292,7 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 1)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s8))
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: $w0 = COPY [[LOAD]](s32)
@ -300,7 +300,7 @@ body: |
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%2:_(s32) = G_ANYEXT %1
G_BR %bb.1
bb.1:
@ -316,7 +316,7 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SEXTLOAD]](s32)
@ -325,7 +325,7 @@ body: |
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
G_BR %bb.1
bb.1:
%2:_(s64) = G_ANYEXT %1
@ -341,7 +341,7 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[SEXTLOAD]](s64)
@ -351,7 +351,7 @@ body: |
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
G_BR %bb.1
bb.1:
%2:_(s32) = G_ANYEXT %1
@ -367,7 +367,7 @@ body: |
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load 1)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s64) = G_SEXTLOAD [[COPY]](p0) :: (load (s8))
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[SEXTLOAD]](s64)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s8)
; CHECK: G_BR %bb.1
@ -380,7 +380,7 @@ body: |
bb.0:
liveins: $x0
%0:_(p0) = COPY $x0
%1:_(s8) = G_LOAD %0 :: (load 1)
%1:_(s8) = G_LOAD %0 :: (load (s8))
%4:_(s32) = G_ANYEXT %1
G_BR %bb.1
bb.1:
@ -403,12 +403,12 @@ body: |
; CHECK-LABEL: name: test_atomic
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load unordered 2)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load unordered (s16))
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
; CHECK: $w0 = COPY [[ZEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s16) = G_LOAD %0(p0) :: (load unordered 2)
%1:_(s16) = G_LOAD %0(p0) :: (load unordered (s16))
%2:_(s32) = G_ZEXT %1(s16)
$w0 = COPY %2(s32)
RET_ReallyLR implicit $w0

View File

@ -12,14 +12,14 @@ body: |
; CHECK-LABEL: name: icmp_trunc_sextload
; CHECK: liveins: $x0
; CHECK: %v:_(p0) = COPY $x0
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load 4)
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %load(s64), [[C]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%v:_(p0) = COPY $x0
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load 4)
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
%trunc:_(s32) = G_TRUNC %load(s64)
%zero:_(s32) = G_CONSTANT i32 0
%cmp:_(s1) = G_ICMP intpred(ne), %trunc(s32), %zero
@ -37,14 +37,14 @@ body: |
; CHECK-LABEL: name: icmp_trunc_sextload_eq
; CHECK: liveins: $x0
; CHECK: %v:_(p0) = COPY $x0
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load 4)
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: %cmp:_(s1) = G_ICMP intpred(eq), %load(s64), [[C]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT %cmp(s1)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%v:_(p0) = COPY $x0
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load 4)
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
%trunc:_(s32) = G_TRUNC %load(s64)
%zero:_(s32) = G_CONSTANT i32 0
%cmp:_(s1) = G_ICMP intpred(eq), %trunc(s32), %zero
@ -62,7 +62,7 @@ body: |
; CHECK-LABEL: name: icmp_trunc_sextload_wrongpred
; CHECK: liveins: $x0
; CHECK: %v:_(p0) = COPY $x0
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load 4)
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
; CHECK: %trunc:_(s32) = G_TRUNC %load(s64)
; CHECK: %zero:_(s32) = G_CONSTANT i32 0
; CHECK: %cmp:_(s1) = G_ICMP intpred(slt), %trunc(s32), %zero
@ -70,7 +70,7 @@ body: |
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%v:_(p0) = COPY $x0
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load 4)
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
%trunc:_(s32) = G_TRUNC %load(s64)
%zero:_(s32) = G_CONSTANT i32 0
%cmp:_(s1) = G_ICMP intpred(slt), %trunc(s32), %zero
@ -88,7 +88,7 @@ body: |
; CHECK-LABEL: name: icmp_trunc_sextload_extend_mismatch
; CHECK: liveins: $x0
; CHECK: %v:_(p0) = COPY $x0
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load 4)
; CHECK: %load:_(s64) = G_SEXTLOAD %v(p0) :: (load (s32))
; CHECK: %trunc:_(s16) = G_TRUNC %load(s64)
; CHECK: %zero:_(s16) = G_CONSTANT i16 0
; CHECK: %cmp:_(s1) = G_ICMP intpred(ne), %trunc(s16), %zero
@ -96,7 +96,7 @@ body: |
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%v:_(p0) = COPY $x0
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load 4)
%load:_(s64) = G_SEXTLOAD %v:_(p0) :: (load (s32))
%trunc:_(s16) = G_TRUNC %load(s64)
%zero:_(s16) = G_CONSTANT i16 0
%cmp:_(s1) = G_ICMP intpred(ne), %trunc(s16), %zero

View File

@ -22,16 +22,16 @@ body: |
; CHECK-LABEL: name: not_necessarily_equiv_loads
; CHECK: %ptr:_(p0) = G_GLOBAL_VALUE @g
; CHECK: %load1:_(s32) = G_LOAD %ptr(p0) :: (load 4 from @g)
; CHECK: %load2:_(s32) = G_LOAD %ptr(p0) :: (load 4 from @g)
; CHECK: %load1:_(s32) = G_LOAD %ptr(p0) :: (load (s32) from @g)
; CHECK: %load2:_(s32) = G_LOAD %ptr(p0) :: (load (s32) from @g)
; CHECK: %or:_(s32) = G_OR %load2, %load1
; CHECK: G_STORE %or(s32), %ptr(p0) :: (store 4 into @g)
; CHECK: G_STORE %or(s32), %ptr(p0) :: (store (s32) into @g)
; CHECK: RET_ReallyLR
%ptr:_(p0) = G_GLOBAL_VALUE @g
%load1:_(s32) = G_LOAD %ptr(p0) :: (load 4 from @g)
%load2:_(s32) = G_LOAD %ptr(p0) :: (load 4 from @g)
%load1:_(s32) = G_LOAD %ptr(p0) :: (load (s32) from @g)
%load2:_(s32) = G_LOAD %ptr(p0) :: (load (s32) from @g)
%or:_(s32) = G_OR %load2, %load1
G_STORE %or(s32), %ptr(p0) :: (store 4 into @g)
G_STORE %or(s32), %ptr(p0) :: (store (s32) into @g)
RET_ReallyLR
...
@ -46,14 +46,14 @@ body: |
; CHECK-LABEL: name: invariant_loads
; CHECK: %ptr:_(p0) = G_GLOBAL_VALUE @g
; CHECK: %load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load 4 from @g)
; CHECK: G_STORE %load2(s32), %ptr(p0) :: (store 4 into @g)
; CHECK: %load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load (s32) from @g)
; CHECK: G_STORE %load2(s32), %ptr(p0) :: (store (s32) into @g)
; CHECK: RET_ReallyLR
%ptr:_(p0) = G_GLOBAL_VALUE @g
%load1:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load 4 from @g)
%load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load 4 from @g)
%load1:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load (s32) from @g)
%load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load (s32) from @g)
%or:_(s32) = G_OR %load2, %load1
G_STORE %or(s32), %ptr(p0) :: (store 4 into @g)
G_STORE %or(s32), %ptr(p0) :: (store (s32) into @g)
RET_ReallyLR
...
@ -68,15 +68,15 @@ body: |
; CHECK-LABEL: name: both_have_to_be_invariant
; CHECK: %ptr:_(p0) = G_GLOBAL_VALUE @g
; CHECK: %load1:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load 4 from @g)
; CHECK: %load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable load 4 from @g)
; CHECK: %load1:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load (s32) from @g)
; CHECK: %load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable load (s32) from @g)
; CHECK: %or:_(s32) = G_OR %load2, %load1
; CHECK: G_STORE %or(s32), %ptr(p0) :: (store 4 into @g)
; CHECK: G_STORE %or(s32), %ptr(p0) :: (store (s32) into @g)
; CHECK: RET_ReallyLR
%ptr:_(p0) = G_GLOBAL_VALUE @g
%load1:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load 4 from @g)
%load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable load 4 from @g)
%load1:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable invariant load (s32) from @g)
%load2:_(s32) = G_LOAD %ptr(p0) :: (dereferenceable load (s32) from @g)
%or:_(s32) = G_OR %load2, %load1
G_STORE %or(s32), %ptr(p0) :: (store 4 into @g)
G_STORE %or(s32), %ptr(p0) :: (store (s32) into @g)
RET_ReallyLR
...

View File

@ -122,15 +122,15 @@ body: |
; CHECK: G_BR %bb.1
; CHECK: bb.1:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: %ld1:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load 16)
; CHECK: %ld1:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
; CHECK: G_BR %bb.3
; CHECK: bb.2:
; CHECK: successors: %bb.3(0x80000000)
; CHECK: %ld2:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load 16)
; CHECK: %ld2:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
; CHECK: bb.3:
; CHECK: %phi:_(<4 x s32>) = G_PHI %ld1(<4 x s32>), %bb.1, %ld2(<4 x s32>), %bb.2
; CHECK: %ext:_(<4 x s64>) = G_SEXT %phi(<4 x s32>)
; CHECK: G_STORE %ext(<4 x s64>), %ptr(p0) :: (store 16)
; CHECK: G_STORE %ext(<4 x s64>), %ptr(p0) :: (store (<4 x s64>))
; CHECK: RET_ReallyLR
bb.1.entry:
liveins: $x0, $q0, $q1
@ -143,16 +143,16 @@ body: |
G_BR %bb.3
bb.2:
%ld1:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load 16)
%ld1:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
G_BR %bb.4
bb.3:
%ld2:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load 16)
%ld2:_(<4 x s32>) = G_LOAD %ptr(p0) :: (load (<4 x s32>))
bb.4:
%phi:_(<4 x s32>) = G_PHI %ld1(<4 x s32>), %bb.2, %ld2(<4 x s32>), %bb.3
%ext:_(<4 x s64>) = G_SEXT %phi
G_STORE %ext(<4 x s64>), %ptr(p0) :: (store 16)
G_STORE %ext(<4 x s64>), %ptr(p0) :: (store (<4 x s64>))
RET_ReallyLR
...

View File

@ -13,12 +13,12 @@ body: |
; CHECK-LABEL: name: sextload_from_inreg
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load 1, align 2)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXTLOAD]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s16) = G_LOAD %0(p0) :: (load 2)
%1:_(s16) = G_LOAD %0(p0) :: (load (s16))
%2:_(s16) = G_SEXT_INREG %1, 8
%3:_(s32) = G_ANYEXT %2(s16)
$w0 = COPY %3(s32)
@ -39,14 +39,14 @@ body: |
; CHECK-LABEL: name: sextload_from_inreg_across_store
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load 1, align 2)
; CHECK: G_STORE [[COPY]](p0), [[COPY]](p0) :: (store 8)
; CHECK: [[SEXTLOAD:%[0-9]+]]:_(s16) = G_SEXTLOAD [[COPY]](p0) :: (load (s8), align 2)
; CHECK: G_STORE [[COPY]](p0), [[COPY]](p0) :: (store (p0))
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXTLOAD]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s16) = G_LOAD %0(p0) :: (load 2)
G_STORE %0(p0), %0(p0) :: (store 8)
%1:_(s16) = G_LOAD %0(p0) :: (load (s16))
G_STORE %0(p0), %0(p0) :: (store (p0))
%2:_(s16) = G_SEXT_INREG %1, 8
%3:_(s32) = G_ANYEXT %2(s16)
$w0 = COPY %3(s32)
@ -66,12 +66,12 @@ body: |
; CHECK-LABEL: name: non_pow_2_inreg
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4)
; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load (s32))
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[LOAD]], 24
; CHECK: $w0 = COPY [[SEXT_INREG]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s32) = G_LOAD %0(p0) :: (load 4)
%1:_(s32) = G_LOAD %0(p0) :: (load (s32))
%2:_(s32) = G_SEXT_INREG %1, 24
$w0 = COPY %2(s32)
RET_ReallyLR implicit $w0
@ -90,13 +90,13 @@ body: |
; CHECK-LABEL: name: atomic
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load acquire 2)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (load acquire (s16))
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[LOAD]], 8
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s16) = G_LOAD %0(p0) :: (load acquire 2)
%1:_(s16) = G_LOAD %0(p0) :: (load acquire (s16))
%2:_(s16) = G_SEXT_INREG %1, 8
%3:_(s32) = G_ANYEXT %2(s16)
$w0 = COPY %3(s32)
@ -116,13 +116,13 @@ body: |
; CHECK-LABEL: name: volatile
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (volatile load 2)
; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[COPY]](p0) :: (volatile load (s16))
; CHECK: [[SEXT_INREG:%[0-9]+]]:_(s16) = G_SEXT_INREG [[LOAD]], 8
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SEXT_INREG]](s16)
; CHECK: $w0 = COPY [[ANYEXT]](s32)
; CHECK: RET_ReallyLR implicit $w0
%0:_(p0) = COPY $x0
%1:_(s16) = G_LOAD %0(p0) :: (volatile load 2)
%1:_(s16) = G_LOAD %0(p0) :: (volatile load (s16))
%2:_(s16) = G_SEXT_INREG %1, 8
%3:_(s32) = G_ANYEXT %2(s16)
$w0 = COPY %3(s32)

View File

@ -29,7 +29,7 @@ body: |
; CHECK: [[FCVTHSr:%[0-9]+]]:fpr16 = FCVTHSr [[COPY]]
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:fpr32 = SUBREG_TO_REG 0, [[FCVTHSr]], %subreg.hsub
; CHECK: [[COPY1:%[0-9]+]]:gpr32all = COPY [[SUBREG_TO_REG]]
; CHECK: STRHHui [[PHI]], [[DEF1]], 0 :: (store 2 into `half* undef`)
; CHECK: STRHHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
; CHECK: B %bb.2
bb.0:
successors: %bb.1(0x80000000)
@ -50,7 +50,7 @@ body: |
%3:gpr(s16) = G_PHI %1(s16), %bb.1, %5(s16), %bb.2
%5:fpr(s16) = G_FPTRUNC %8(s32)
G_STORE %3(s16), %4(p0) :: (store 2 into `half* undef`)
G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
G_BR %bb.2
...
@ -83,7 +83,7 @@ body: |
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[PHI:%[0-9]+]]:fpr16 = PHI %7, %bb.2, [[COPY2]], %bb.1
; CHECK: [[FCVTHSr:%[0-9]+]]:fpr16 = FCVTHSr [[COPY]]
; CHECK: STRHui [[PHI]], [[DEF1]], 0 :: (store 2 into `half* undef`)
; CHECK: STRHui [[PHI]], [[DEF1]], 0 :: (store (s16) into `half* undef`)
; CHECK: B %bb.2
bb.0:
successors: %bb.1(0x80000000)
@ -104,7 +104,7 @@ body: |
%3:fpr(s16) = G_PHI %5(s16), %bb.2, %1(s16), %bb.1
%5:fpr(s16) = G_FPTRUNC %8(s32)
G_STORE %3(s16), %4(p0) :: (store 2 into `half* undef`)
G_STORE %3(s16), %4(p0) :: (store (s16) into `half* undef`)
G_BR %bb.2
...
@ -154,10 +154,10 @@ body: |
; CHECK: %use_gp_phi1:gpr32 = PHI %gpr_1, %bb.0, %gp_phi1, %bb.4
; CHECK: %use_gp_phi2:gpr32 = PHI %gpr_1, %bb.0, %gp_phi2, %bb.4
; CHECK: %use_gp_phi3:gpr32 = PHI %gpr_1, %bb.0, %gp_phi3, %bb.4
; CHECK: STRHHui %use_fp_phi, %ptr, 0 :: (store 2)
; CHECK: STRHHui %use_gp_phi1, %ptr, 0 :: (store 2)
; CHECK: STRHHui %use_gp_phi2, %ptr, 0 :: (store 2)
; CHECK: STRHHui %use_gp_phi3, %ptr, 0 :: (store 2)
; CHECK: STRHHui %use_fp_phi, %ptr, 0 :: (store (s16))
; CHECK: STRHHui %use_gp_phi1, %ptr, 0 :: (store (s16))
; CHECK: STRHHui %use_gp_phi2, %ptr, 0 :: (store (s16))
; CHECK: STRHHui %use_gp_phi3, %ptr, 0 :: (store (s16))
; CHECK: RET_ReallyLR
bb.1:
successors: %bb.2, %bb.6
@ -187,9 +187,9 @@ body: |
%use_gp_phi1:gpr(s16) = G_PHI %gpr_1(s16), %bb.1, %gp_phi1(s16), %bb.5
%use_gp_phi2:gpr(s16) = G_PHI %gpr_1(s16), %bb.1, %gp_phi2(s16), %bb.5
%use_gp_phi3:gpr(s16) = G_PHI %gpr_1(s16), %bb.1, %gp_phi3(s16), %bb.5
G_STORE %use_fp_phi(s16), %ptr(p0) :: (store 2)
G_STORE %use_gp_phi1(s16), %ptr(p0) :: (store 2)
G_STORE %use_gp_phi2(s16), %ptr(p0) :: (store 2)
G_STORE %use_gp_phi3(s16), %ptr(p0) :: (store 2)
G_STORE %use_fp_phi(s16), %ptr(p0) :: (store (s16))
G_STORE %use_gp_phi1(s16), %ptr(p0) :: (store (s16))
G_STORE %use_gp_phi2(s16), %ptr(p0) :: (store (s16))
G_STORE %use_gp_phi3(s16), %ptr(p0) :: (store (s16))
RET_ReallyLR
...

Some files were not shown because too many files have changed in this diff Show More