diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index 6b6ce4a2009..08021706fe7 100644 --- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -521,6 +521,18 @@ public: MachineInstrBuilder buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO); + /// Build and insert `Res = Addr, MMO`. + /// + /// Loads the value stored at \p Addr. Puts the result in \p Res. + /// + /// \pre setBasicBlock or setMI must have been called. + /// \pre \p Res must be a generic virtual register. + /// \pre \p Addr must be a generic virtual register with pointer type. + /// + /// \return a MachineInstrBuilder for the newly created instruction. + MachineInstrBuilder buildLoadInstr(unsigned Opcode, unsigned Res, + unsigned Addr, MachineMemOperand &MMO); + /// Build and insert `G_STORE Val, Addr, MMO`. /// /// Stores the value \p Val to \p Addr. diff --git a/include/llvm/Support/TargetOpcodes.def b/include/llvm/Support/TargetOpcodes.def index 8ab7348ca6d..1606e7653ba 100644 --- a/include/llvm/Support/TargetOpcodes.def +++ b/include/llvm/Support/TargetOpcodes.def @@ -265,9 +265,15 @@ HANDLE_TARGET_OPCODE(G_INTTOPTR) /// COPY is the relevant instruction. HANDLE_TARGET_OPCODE(G_BITCAST) -/// Generic load. +/// Generic load (including anyext load) HANDLE_TARGET_OPCODE(G_LOAD) +/// Generic signext load +HANDLE_TARGET_OPCODE(G_SEXTLOAD) + +/// Generic zeroext load +HANDLE_TARGET_OPCODE(G_ZEXTLOAD) + /// Generic store. HANDLE_TARGET_OPCODE(G_STORE) diff --git a/include/llvm/Target/GenericOpcodes.td b/include/llvm/Target/GenericOpcodes.td index 1af6a9ab9f2..89516b3ee75 100644 --- a/include/llvm/Target/GenericOpcodes.td +++ b/include/llvm/Target/GenericOpcodes.td @@ -482,6 +482,22 @@ def G_LOAD : GenericInstruction { let mayLoad = 1; } +// Generic sign-extended load. Expects a MachineMemOperand in addition to explicit operands. +def G_SEXTLOAD : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins ptype1:$addr); + let hasSideEffects = 0; + let mayLoad = 1; +} + +// Generic zero-extended load. Expects a MachineMemOperand in addition to explicit operands. +def G_ZEXTLOAD : GenericInstruction { + let OutOperandList = (outs type0:$dst); + let InOperandList = (ins ptype1:$addr); + let hasSideEffects = 0; + let mayLoad = 1; +} + // Generic store. Expects a MachineMemOperand in addition to explicit operands. def G_STORE : GenericInstruction { let OutOperandList = (outs); diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index 60901e09f0a..e4e5e0c6022 100644 --- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -732,17 +732,19 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { MI.eraseFromParent(); return Legalized; } - case TargetOpcode::G_LOAD: { + case TargetOpcode::G_LOAD: // For some types like i24, we might try to widen to i32. To properly handle // this we should be using a dedicated extending load, until then avoid // trying to legalize. if (alignTo(MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(), 8) != WideTy.getSizeInBits()) return UnableToLegalize; - + LLVM_FALLTHROUGH; + case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_ZEXTLOAD: { unsigned DstExt = MRI.createGenericVirtualRegister(WideTy); - MIRBuilder.buildLoad(DstExt, MI.getOperand(1).getReg(), - **MI.memoperands_begin()); + MIRBuilder.buildLoadInstr(MI.getOpcode(), DstExt, MI.getOperand(1).getReg(), + **MI.memoperands_begin()); MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), DstExt); MI.eraseFromParent(); return Legalized; @@ -1030,6 +1032,44 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { MI.eraseFromParent(); return Legalized; } + case TargetOpcode::G_LOAD: + case TargetOpcode::G_SEXTLOAD: + case TargetOpcode::G_ZEXTLOAD: { + // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned PtrReg = MI.getOperand(1).getReg(); + LLT DstTy = MRI.getType(DstReg); + auto &MMO = **MI.memoperands_begin(); + + if (DstTy.getSizeInBits() == MMO.getSize() /* in bytes */ * 8) { + MIRBuilder.buildLoad(DstReg, PtrReg, MMO); + MI.eraseFromParent(); + return Legalized; + } + + if (DstTy.isScalar()) { + unsigned TmpReg = MRI.createGenericVirtualRegister( + LLT::scalar(MMO.getSize() /* in bytes */ * 8)); + MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpected opcode"); + case TargetOpcode::G_LOAD: + MIRBuilder.buildAnyExt(DstReg, TmpReg); + break; + case TargetOpcode::G_SEXTLOAD: + MIRBuilder.buildSExt(DstReg, TmpReg); + break; + case TargetOpcode::G_ZEXTLOAD: + MIRBuilder.buildZExt(DstReg, TmpReg); + break; + } + MI.eraseFromParent(); + return Legalized; + } + + return UnableToLegalize; + } } } diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 96d95673914..273044bad75 100644 --- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -278,10 +278,16 @@ MachineInstrBuilder MachineIRBuilderBase::buildBrCond(unsigned Tst, MachineInstrBuilder MachineIRBuilderBase::buildLoad(unsigned Res, unsigned Addr, MachineMemOperand &MMO) { + return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO); +} + +MachineInstrBuilder +MachineIRBuilderBase::buildLoadInstr(unsigned Opcode, unsigned Res, + unsigned Addr, MachineMemOperand &MMO) { assert(getMRI()->getType(Res).isValid() && "invalid operand type"); assert(getMRI()->getType(Addr).isPointer() && "invalid operand type"); - return buildInstr(TargetOpcode::G_LOAD) + return buildInstr(Opcode) .addDef(Res) .addUse(Addr) .addMemOperand(&MMO); diff --git a/lib/Target/AArch64/AArch64LegalizerInfo.cpp b/lib/Target/AArch64/AArch64LegalizerInfo.cpp index 0742c76e8f3..1a1f9adf46f 100644 --- a/lib/Target/AArch64/AArch64LegalizerInfo.cpp +++ b/lib/Target/AArch64/AArch64LegalizerInfo.cpp @@ -135,6 +135,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) { .maxScalarIf(typeInSet(1, {s64}), 0, s32) .widenScalarToNextPow2(0); + getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD}) + .lower(); + getActionDefinitionsBuilder({G_LOAD, G_STORE}) .legalForTypesWithMemSize({{s8, p0, 8}, {s16, p0, 16}, @@ -147,6 +150,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST) { .unsupportedIfMemSizeNotPow2() .clampScalar(0, s8, s64) .widenScalarToNextPow2(0) + .lowerIf([=](const LegalityQuery &Query) { + return Query.Types[0].getSizeInBits() != Query.MMODescrs[0].Size * 8; + }) .clampNumElements(0, v2s32, v2s32); // Constants diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir b/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir new file mode 100644 index 00000000000..c96788ac9b9 --- /dev/null +++ b/test/CodeGen/AArch64/GlobalISel/legalize-extload.mir @@ -0,0 +1,25 @@ +# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - -verify-machineinstrs | FileCheck %s + +--- | + target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + target triple = "aarch64--" + define void @test_extload(i8* %addr) { + entry: + ret void + } +... + +--- +name: test_extload +body: | + bb.0.entry: + liveins: $x0 + ; CHECK-LABEL: name: test_extload + ; CHECK: [[T0:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[T1:%[0-9]+]]:_(s8) = G_LOAD [[T0]](p0) :: (load 1 from %ir.addr) + ; CHECK: [[T2:%[0-9]+]]:_(s32) = G_ANYEXT [[T1]](s8) + ; CHECK: $w0 = COPY [[T2]](s32) + %0:_(p0) = COPY $x0 + %1:_(s32) = G_LOAD %0 :: (load 1 from %ir.addr) + $w0 = COPY %1 +... diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir b/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir new file mode 100644 index 00000000000..64dab815445 --- /dev/null +++ b/test/CodeGen/AArch64/GlobalISel/legalize-sextload.mir @@ -0,0 +1,25 @@ +# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - -verify-machineinstrs | FileCheck %s + +--- | + target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + target triple = "aarch64--" + define void @test_zextload(i8* %addr) { + entry: + ret void + } +... + +--- +name: test_zextload +body: | + bb.0.entry: + liveins: $x0 + ; CHECK-LABEL: name: test_zextload + ; CHECK: [[T0:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[T1:%[0-9]+]]:_(s8) = G_LOAD [[T0]](p0) :: (load 1 from %ir.addr) + ; CHECK: [[T2:%[0-9]+]]:_(s32) = G_SEXT [[T1]](s8) + ; CHECK: $w0 = COPY [[T2]](s32) + %0:_(p0) = COPY $x0 + %1:_(s32) = G_SEXTLOAD %0 :: (load 1 from %ir.addr) + $w0 = COPY %1 +... diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir b/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir new file mode 100644 index 00000000000..42bfb2dfb84 --- /dev/null +++ b/test/CodeGen/AArch64/GlobalISel/legalize-zextload.mir @@ -0,0 +1,25 @@ +# RUN: llc -O0 -run-pass=legalizer -global-isel %s -o - -verify-machineinstrs | FileCheck %s + +--- | + target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + target triple = "aarch64--" + define void @test_sextload(i8* %addr) { + entry: + ret void + } +... + +--- +name: test_sextload +body: | + bb.0.entry: + liveins: $x0 + ; CHECK-LABEL: name: test_sextload + ; CHECK: [[T0:%[0-9]+]]:_(p0) = COPY $x0 + ; CHECK: [[T1:%[0-9]+]]:_(s8) = G_LOAD [[T0]](p0) :: (load 1 from %ir.addr) + ; CHECK: [[T2:%[0-9]+]]:_(s32) = G_ZEXT [[T1]](s8) + ; CHECK: $w0 = COPY [[T2]](s32) + %0:_(p0) = COPY $x0 + %1:_(s32) = G_ZEXTLOAD %0 :: (load 1 from %ir.addr) + $w0 = COPY %1 +...